text
stringlengths
81
112k
Return the list of available files in the coverage report. def files(self): """ Return the list of available files in the coverage report. """ # maybe replace with a trie at some point? see has_file FIXME already_seen = set() filenames = [] for el in self.xml.xpath("//class"): filename = el.attrib['filename'] if filename in already_seen: continue already_seen.add(filename) filenames.append(filename) return filenames
Return a list for source lines of file `filename`. def source_lines(self, filename): """ Return a list for source lines of file `filename`. """ with self.filesystem.open(filename) as f: return f.readlines()
Return `True` if coverage of has improved, `False` otherwise. This does not ensure that all changes have been covered. If this is what you want, use `CoberturaDiff.has_all_changes_covered()` instead. def has_better_coverage(self): """ Return `True` if coverage of has improved, `False` otherwise. This does not ensure that all changes have been covered. If this is what you want, use `CoberturaDiff.has_all_changes_covered()` instead. """ for filename in self.files(): if self.diff_total_misses(filename) > 0: return False return True
Return `True` if all changes have been covered, `False` otherwise. def has_all_changes_covered(self): """ Return `True` if all changes have been covered, `False` otherwise. """ for filename in self.files(): for hunk in self.file_source_hunks(filename): for line in hunk: if line.reason is None: continue # line untouched if line.status is False: return False # line not covered return True
Return the difference between `self.cobertura2.<attr_name>(filename)` and `self.cobertura1.<attr_name>(filename)`. This generic method is meant to diff the count of methods that return counts for a given file `filename`, e.g. `Cobertura.total_statements`, `Cobertura.total_misses`, ... The returned count may be a float. def _diff_attr(self, attr_name, filename): """ Return the difference between `self.cobertura2.<attr_name>(filename)` and `self.cobertura1.<attr_name>(filename)`. This generic method is meant to diff the count of methods that return counts for a given file `filename`, e.g. `Cobertura.total_statements`, `Cobertura.total_misses`, ... The returned count may be a float. """ if filename is not None: files = [filename] else: files = self.files() total_count = 0.0 for filename in files: if self.cobertura1.has_file(filename): method = getattr(self.cobertura1, attr_name) count1 = method(filename) else: count1 = 0.0 method = getattr(self.cobertura2, attr_name) count2 = method(filename) total_count += count2 - count1 return total_count
Return a list of 2-element tuples `(lineno, is_new)` for the given file `filename` where `lineno` is a missed line number and `is_new` indicates whether the missed line was introduced (True) or removed (False). def diff_missed_lines(self, filename): """ Return a list of 2-element tuples `(lineno, is_new)` for the given file `filename` where `lineno` is a missed line number and `is_new` indicates whether the missed line was introduced (True) or removed (False). """ line_changed = [] for line in self.file_source(filename): if line.status is not None: is_new = not line.status line_changed.append((line.number, is_new)) return line_changed
Return a list of namedtuple `Line` for each line of code found in the given file `filename`. def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the given file `filename`. """ if self.cobertura1.has_file(filename) and \ self.cobertura1.filesystem.has_file(filename): lines1 = self.cobertura1.source_lines(filename) line_statuses1 = dict(self.cobertura1.line_statuses( filename)) else: lines1 = [] line_statuses1 = {} lines2 = self.cobertura2.source_lines(filename) line_statuses2 = dict(self.cobertura2.line_statuses(filename)) # Build a dict of lineno2 -> lineno1 lineno_map = reconcile_lines(lines2, lines1) lines = [] for lineno, source in enumerate(lines2, start=1): status = None reason = None if lineno not in lineno_map: # line was added or removed, just use whatever coverage status # is available as there is nothing to compare against. status = line_statuses2.get(lineno) reason = 'line-edit' else: other_lineno = lineno_map[lineno] line_status1 = line_statuses1.get(other_lineno) line_status2 = line_statuses2.get(lineno) if line_status1 is line_status2: status = None # unchanged reason = None elif line_status1 is True and line_status2 is False: status = False # decreased reason = 'cov-down' elif line_status1 is False and line_status2 is True: status = True # increased reason = 'cov-up' line = Line(lineno, source, status, reason) lines.append(line) return lines
Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status. def file_source_hunks(self, filename): """ Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status. """ lines = self.file_source(filename) hunks = hunkify_lines(lines) return hunks
Flushes the queue periodically. def monitor(self): """Flushes the queue periodically.""" while self.monitor_running.is_set(): if time.time() - self.last_flush > self.batch_time: if not self.queue.empty(): logger.info("Queue Flush: time without flush exceeded") self.flush_queue() time.sleep(self.batch_time)
Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to. def put_records(self, records, partition_key=None): """Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to. """ for record in records: self.put_record(record, partition_key)
Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to. def put_record(self, data, partition_key=None): """Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to. """ # Byte encode the data data = encode_data(data) # Create a random partition key if not provided if not partition_key: partition_key = uuid.uuid4().hex # Build the record record = { 'Data': data, 'PartitionKey': partition_key } # Flush the queue if it reaches the batch size if self.queue.qsize() >= self.batch_size: logger.info("Queue Flush: batch size reached") self.pool.submit(self.flush_queue) # Append the record logger.debug('Putting record "{}"'.format(record['Data'][:100])) self.queue.put(record)
Flushes the queue and waits for the executor to finish. def close(self): """Flushes the queue and waits for the executor to finish.""" logger.info('Closing producer') self.flush_queue() self.monitor_running.clear() self.pool.shutdown() logger.info('Producer closed')
Grab all the current records in the queue and send them. def flush_queue(self): """Grab all the current records in the queue and send them.""" records = [] while not self.queue.empty() and len(records) < self.batch_size: records.append(self.queue.get()) if records: self.send_records(records) self.last_flush = time.time()
Send records to the Kinesis stream. Falied records are sent again with an exponential backoff decay. Parameters ---------- records : array Array of formated records to send. attempt: int Number of times the records have been sent without success. def send_records(self, records, attempt=0): """Send records to the Kinesis stream. Falied records are sent again with an exponential backoff decay. Parameters ---------- records : array Array of formated records to send. attempt: int Number of times the records have been sent without success. """ # If we already tried more times than we wanted, save to a file if attempt > self.max_retries: logger.warning('Writing {} records to file'.format(len(records))) with open('failed_records.dlq', 'ab') as f: for r in records: f.write(r.get('Data')) return # Sleep before retrying if attempt: time.sleep(2 ** attempt * .1) response = self.kinesis_client.put_records(StreamName=self.stream_name, Records=records) failed_record_count = response['FailedRecordCount'] # Grab failed records if failed_record_count: logger.warning('Retrying failed records') failed_records = [] for i, record in enumerate(response['Records']): if record.get('ErrorCode'): failed_records.append(records[i]) # Recursive call attempt += 1 self.send_records(failed_records, attempt=attempt)
Assumes the list is sorted. def rangify(number_list): """Assumes the list is sorted.""" if not number_list: return number_list ranges = [] range_start = prev_num = number_list[0] for num in number_list[1:]: if num != (prev_num + 1): ranges.append((range_start, prev_num)) range_start = num prev_num = num ranges.append((range_start, prev_num)) return ranges
Given the following input: >>> lines_w_status = [ (1, True), (4, True), (7, False), (9, False), ] Return expanded lines with their extrapolated line status. >>> extrapolate_coverage(lines_w_status) == [ (1, True), (2, True), (3, True), (4, True), (5, None), (6, None), (7, False), (8, False), (9, False), ] def extrapolate_coverage(lines_w_status): """ Given the following input: >>> lines_w_status = [ (1, True), (4, True), (7, False), (9, False), ] Return expanded lines with their extrapolated line status. >>> extrapolate_coverage(lines_w_status) == [ (1, True), (2, True), (3, True), (4, True), (5, None), (6, None), (7, False), (8, False), (9, False), ] """ lines = [] prev_lineno = 0 prev_status = True for lineno, status in lines_w_status: while (lineno - prev_lineno) > 1: prev_lineno += 1 if prev_status is status: lines.append((prev_lineno, status)) else: lines.append((prev_lineno, None)) lines.append((lineno, status)) prev_lineno = lineno prev_status = status return lines
Return a dict `{lineno1: lineno2}` which reconciles line numbers `lineno1` of list `lines1` to line numbers `lineno2` of list `lines2`. Only lines that are common in both sets are present in the dict, lines unique to one of the sets are omitted. def reconcile_lines(lines1, lines2): """ Return a dict `{lineno1: lineno2}` which reconciles line numbers `lineno1` of list `lines1` to line numbers `lineno2` of list `lines2`. Only lines that are common in both sets are present in the dict, lines unique to one of the sets are omitted. """ differ = difflib.Differ() diff = differ.compare(lines1, lines2) SAME = ' ' ADDED = '+ ' REMOVED = '- ' INFO = '? ' lineno_map = {} # {lineno1: lineno2, ...} lineno1_offset = 0 lineno2 = 1 for diffline in diff: if diffline.startswith(INFO): continue if diffline.startswith(SAME): lineno1 = lineno2 + lineno1_offset lineno_map[lineno1] = lineno2 elif diffline.startswith(ADDED): lineno1_offset -= 1 elif diffline.startswith(REMOVED): lineno1_offset += 1 continue lineno2 += 1 return lineno_map
Return a list of line hunks given a list of lines `lines`. The number of context lines can be control with `context` which will return line hunks surrounded with `context` lines before and after the code change. def hunkify_lines(lines, context=3): """ Return a list of line hunks given a list of lines `lines`. The number of context lines can be control with `context` which will return line hunks surrounded with `context` lines before and after the code change. """ # Find contiguous line changes ranges = [] range_start = None for i, line in enumerate(lines): if line.status is not None: if range_start is None: range_start = i continue elif range_start is not None: range_stop = i ranges.append((range_start, range_stop)) range_start = None else: # Append the last range if range_start is not None: range_stop = i ranges.append((range_start, range_stop)) # add context ranges_w_context = [] for range_start, range_stop in ranges: range_start = range_start - context range_start = range_start if range_start >= 0 else 0 range_stop = range_stop + context ranges_w_context.append((range_start, range_stop)) # merge overlapping hunks merged_ranges = ranges_w_context[:1] for range_start, range_stop in ranges_w_context[1:]: prev_start, prev_stop = merged_ranges[-1] if range_start <= prev_stop: range_start = prev_start merged_ranges[-1] = (range_start, range_stop) else: merged_ranges.append((range_start, range_stop)) # build final hunks hunks = [] for range_start, range_stop in merged_ranges: hunk = lines[range_start:range_stop] hunks.append(hunk) return hunks
show coverage summary of a Cobertura report def show(cobertura_file, format, output, source, source_prefix): """show coverage summary of a Cobertura report""" cobertura = Cobertura(cobertura_file, source=source) Reporter = reporters[format] reporter = Reporter(cobertura) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') isatty = True if output is None else output.isatty() click.echo(report, file=output, nl=isatty)
compare coverage of two Cobertura reports def diff( cobertura_file1, cobertura_file2, color, format, output, source1, source2, source_prefix1, source_prefix2, source): """compare coverage of two Cobertura reports""" cobertura1 = Cobertura( cobertura_file1, source=source1, source_prefix=source_prefix1 ) cobertura2 = Cobertura( cobertura_file2, source=source2, source_prefix=source_prefix2 ) Reporter = delta_reporters[format] reporter_args = [cobertura1, cobertura2] reporter_kwargs = {'show_source': source} isatty = True if output is None else output.isatty() if format == 'text': color = isatty if color is None else color is True reporter_kwargs['color'] = color reporter = Reporter(*reporter_args, **reporter_kwargs) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') click.echo(report, file=output, nl=isatty, color=color) exit_code = get_exit_code(reporter.differ, source) raise SystemExit(exit_code)
Yield a file-like object for file `filename`. This function is a context manager. def open(self, filename): """ Yield a file-like object for file `filename`. This function is a context manager. """ filename = self.real_filename(filename) if not os.path.exists(filename): raise self.FileNotFound(filename) with codecs.open(filename, encoding='utf-8') as f: yield f
Injecting message into topic. if _msg_content, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _msg_content: optional message content :param kwargs: each extra kwarg will be put int he message is structure matches :return: def topic_inject(self, topic_name, _msg_content=None, **kwargs): """ Injecting message into topic. if _msg_content, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _msg_content: optional message content :param kwargs: each extra kwarg will be put int he message is structure matches :return: """ #changing unicode to string ( testing stability of multiprocess debugging ) if isinstance(topic_name, unicode): topic_name = unicodedata.normalize('NFKD', topic_name).encode('ascii', 'ignore') if _msg_content is not None: # logging.warn("injecting {msg} into {topic}".format(msg=_msg_content, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, _msg_content,)) else: # default kwargs is {} # logging.warn("injecting {msg} into {topic}".format(msg=kwargs, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, kwargs,)) return res is None
Setting parameter. if _value, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _value: optional value :param kwargs: each extra kwarg will be put in the value if structure matches :return: def param_set(self, param_name, _value=None, **kwargs): """ Setting parameter. if _value, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _value: optional value :param kwargs: each extra kwarg will be put in the value if structure matches :return: """ #changing unicode to string ( testing stability of multiprocess debugging ) if isinstance(param_name, unicode): param_name = unicodedata.normalize('NFKD', param_name).encode('ascii', 'ignore') _value = _value or {} if kwargs: res = self.param_svc.call(args=(param_name, kwargs,)) elif _value is not None: res = self.param_svc.call(args=(param_name, _value,)) else: # if _msg_content is None the request is invalid. # just return something to mean False. res = 'WRONG SET' return res is None
runner def run(self): """runner""" # change version in code and changelog before running this subprocess.check_call("git commit CHANGELOG.rst pyros/_version.py CHANGELOG.rst -m 'v{0}'".format(__version__), shell=True) subprocess.check_call("git push", shell=True) print("You should verify travis checks, and you can publish this release with :") print(" python setup.py publish") sys.exit()
Start a pyros node. :param interface: the interface implementation (ROS, Mock, ZMP, etc.) :param config: the config file path, absolute, or relative to working directory :param logfile: the logfile path, absolute, or relative to working directory :param ros_args: the ros arguments (useful to absorb additional args when launched with roslaunch) def run(interface, config, logfile, ros_args): """ Start a pyros node. :param interface: the interface implementation (ROS, Mock, ZMP, etc.) :param config: the config file path, absolute, or relative to working directory :param logfile: the logfile path, absolute, or relative to working directory :param ros_args: the ros arguments (useful to absorb additional args when launched with roslaunch) """ logging.info( 'pyros started with : interface {interface} config {config} logfile {logfile} ros_args {ros_args}'.format( interface=interface, config=config, logfile=logfile, ros_args=ros_args)) if interface == 'ros': node_proc = pyros_rosinterface_launch(node_name='pyros_rosinterface', pyros_config=config, ros_argv=ros_args) else: node_proc = None # NOT IMPLEMENTED # node_proc.daemon = True # we do NOT want a daemon(would stop when this main process exits...) client_conn = node_proc.start()
store nick, user, host in kwargs if prefix is correct format def nickmask(prefix: str, kwargs: Dict[str, Any]) -> None: """ store nick, user, host in kwargs if prefix is correct format """ if "!" in prefix and "@" in prefix: # From a user kwargs["nick"], remainder = prefix.split("!", 1) kwargs["user"], kwargs["host"] = remainder.split("@", 1) else: # From a server, probably the host kwargs["host"] = prefix
Parse message according to rfc 2812 for routing def split_line(msg: str) -> Tuple[str, str, List[str]]: """ Parse message according to rfc 2812 for routing """ match = RE_IRCLINE.match(msg) if not match: raise ValueError("Invalid line") prefix = match.group("prefix") or "" command = match.group("command") params = (match.group("params") or "").split() message = match.group("message") or "" if message: params.append(message) return prefix, command, params
Return `present` value (default to `field`) if `field` in `kwargs` and Truthy, otherwise return `missing` value def b(field: str, kwargs: Dict[str, Any], present: Optional[Any] = None, missing: Any = '') -> str: """ Return `present` value (default to `field`) if `field` in `kwargs` and Truthy, otherwise return `missing` value """ if kwargs.get(field): return field if present is None else str(present) return str(missing)
Alias for more readable command construction def f(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None) -> str: """ Alias for more readable command construction """ if default is not None: return str(kwargs.get(field, default)) return str(kwargs[field])
Util for joining multiple fields with commas def pack(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None, sep: str=',') -> str: """ Util for joining multiple fields with commas """ if default is not None: value = kwargs.get(field, default) else: value = kwargs[field] if isinstance(value, str): return value elif isinstance(value, collections.abc.Iterable): return sep.join(str(f) for f in value) else: return str(value)
Pack a command to send to an IRC server def pack_command(command: str, **kwargs: Any) -> str: """ Pack a command to send to an IRC server """ if not command: raise ValueError("Must provide a command") if not isinstance(command, str): raise ValueError("Command must be a string") command = command.upper() # ======================================================================== # For each command, provide: # 1. a link to the definition in rfc2812 # 2. the normalized grammar, which may not equate to the rfc grammar # the normalized grammar will use the keys expected in kwargs, # which usually do NOT line up with rfc2812. They may also make # optional fields which are required in rfc2812, by providing # the most common or reasonable defaults. # 3. exhaustive examples, preferring normalized form of # the rfc2812 examples # ======================================================================== # ======================================================================== # Normalized grammar: # : should not be provided; it denotes the beginning of the last # field, which may contain spaces # [] indicates an optional field # <> denote the key that the field will be filled with # because fields are filled from a dict, required fields may follow # optional fields - see USER command, where mode is optional # (and defaults to 0) # "" indicates a literal value that is inserted if present # ======================================================================== # PASS # https://tools.ietf.org/html/rfc2812#section-3.1.1 # PASS <password> # ---------- # PASS secretpasswordhere if command == "PASS": return "PASS " + f("password", kwargs) # NICK # https://tools.ietf.org/html/rfc2812#section-3.1.2 # NICK <nick> # ---------- # NICK Wiz elif command == "NICK": return "NICK " + f("nick", kwargs) # USER # https://tools.ietf.org/html/rfc2812#section-3.1.3 # USER <user> [<mode>] :<realname> # ---------- # USER guest 8 :Ronnie Reagan # USER guest :Ronnie Reagan elif command == "USER": return "USER {} {} * :{}".format( f("user", kwargs), f("mode", kwargs, 0), f("realname", kwargs)) # OPER # https://tools.ietf.org/html/rfc2812#section-3.1.4 # OPER <user> <password> # ---------- # OPER AzureDiamond hunter2 elif command == "OPER": return "OPER {} {}".format(f("user", kwargs), f("password", kwargs)) # USERMODE (renamed from MODE) # https://tools.ietf.org/html/rfc2812#section-3.1.5 # MODE <nick> [<modes>] # ---------- # MODE WiZ -w # MODE Angel +i # MODE elif command == "USERMODE": return "MODE {} {}".format(f("nick", kwargs), f("modes", kwargs, '')) # SERVICE # https://tools.ietf.org/html/rfc2812#section-3.1.6 # SERVICE <nick> <distribution> <type> :<info> # ---------- # SERVICE dict *.fr 0 :French elif command == "SERVICE": return "SERVICE {} * {} {} 0 :{}".format( f("nick", kwargs), f("distribution", kwargs), f("type", kwargs), f("info", kwargs)) # QUIT # https://tools.ietf.org/html/rfc2812#section-3.1.7 # QUIT :[<message>] # ---------- # QUIT :Gone to lunch # QUIT elif command == "QUIT": if "message" in kwargs: return "QUIT :" + f("message", kwargs) return "QUIT" # SQUIT # https://tools.ietf.org/html/rfc2812#section-3.1.8 # SQUIT <server> [<message>] # ---------- # SQUIT tolsun.oulu.fi :Bad Link # SQUIT tolsun.oulu.fi elif command == "SQUIT": base = "SQUIT " + f("server", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # JOIN # https://tools.ietf.org/html/rfc2812#section-3.2.1 # JOIN <channel> [<key>] # ---------- # JOIN #foo fookey # JOIN #foo # JOIN 0 elif command == "JOIN": return "JOIN {} {}".format(pack("channel", kwargs), pack("key", kwargs, '')) # PART # https://tools.ietf.org/html/rfc2812#section-3.2.2 # PART <channel> :[<message>] # ---------- # PART #foo :I lost # PART #foo elif command == "PART": base = "PART " + pack("channel", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # CHANNELMODE (renamed from MODE) # https://tools.ietf.org/html/rfc2812#section-3.2.3 # MODE <channel> <modes> [<params>] # ---------- # MODE #Finnish +imI *!*@*.fi # MODE #en-ops +v WiZ # MODE #Fins -s elif command == "CHANNELMODE": return "MODE {} {} {}".format(f("channel", kwargs), f("modes", kwargs), f("params", kwargs, '')) # TOPIC # https://tools.ietf.org/html/rfc2812#section-3.2.4 # TOPIC <channel> :[<message>] # ---------- # TOPIC #test :New topic # TOPIC #test : # TOPIC #test elif command == "TOPIC": base = "TOPIC " + f("channel", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # NAMES # https://tools.ietf.org/html/rfc2812#section-3.2.5 # NAMES [<channel>] [<target>] # ---------- # NAMES #twilight_zone remote.*.edu # NAMES #twilight_zone # NAMES elif command == "NAMES": if "channel" in kwargs: return "NAMES {} {}".format(pack("channel", kwargs), f("target", kwargs, '')) return "NAMES" # LIST # https://tools.ietf.org/html/rfc2812#section-3.2.6 # LIST [<channel>] [<target>] # ---------- # LIST #twilight_zone remote.*.edu # LIST #twilight_zone # LIST elif command == "LIST": if "channel" in kwargs: return "LIST {} {}".format(pack("channel", kwargs), f("target", kwargs, '')) return "LIST" # INVITE # https://tools.ietf.org/html/rfc2812#section-3.2.7 # INVITE <nick> <channel> # ---------- # INVITE Wiz #Twilight_Zone elif command == "INVITE": return "INVITE {} {}".format(f("nick", kwargs), f("channel", kwargs)) # KICK # https://tools.ietf.org/html/rfc2812#section-3.2.8 # KICK <channel> <nick> :[<message>] # ---------- # KICK #Finnish WiZ :Speaking English # KICK #Finnish WiZ,Wiz-Bot :Both speaking English # KICK #Finnish,#English WiZ,ZiW :Speaking wrong language elif command == "KICK": base = "KICK {} {}".format(pack("channel", kwargs), pack("nick", kwargs)) if "message" in kwargs: return base + " :" + pack("message", kwargs) return base # PRIVMSG # https://tools.ietf.org/html/rfc2812#section-3.3.1 # PRIVMSG <target> :<message> # ---------- # PRIVMSG Angel :yes I'm receiving it ! # PRIVMSG $*.fi :Server tolsun.oulu.fi rebooting. # PRIVMSG #Finnish :This message is in english elif command == "PRIVMSG": return "PRIVMSG {} :{}".format(f("target", kwargs), f("message", kwargs)) # NOTICE # https://tools.ietf.org/html/rfc2812#section-3.3.2 # NOTICE <target> :<message> # ---------- # NOTICE Angel :yes I'm receiving it ! # NOTICE $*.fi :Server tolsun.oulu.fi rebooting. # NOTICE #Finnish :This message is in english elif command == "NOTICE": return "NOTICE {} :{}".format(f("target", kwargs), f("message", kwargs)) # MOTD # https://tools.ietf.org/html/rfc2812#section-3.4.1 # MOTD [<target>] # ---------- # MOTD remote.*.edu # MOTD elif command == "MOTD": return "MOTD " + f("target", kwargs, '') # LUSERS # https://tools.ietf.org/html/rfc2812#section-3.4.2 # LUSERS [<mask>] [<target>] # ---------- # LUSERS *.edu remote.*.edu # LUSERS *.edu # LUSERS elif command == "LUSERS": if "mask" in kwargs: return "LUSERS {} {}".format(f("mask", kwargs), f("target", kwargs, '')) return "LUSERS" # VERSION # https://tools.ietf.org/html/rfc2812#section-3.4.3 # VERSION [<target>] # ---------- # VERSION remote.*.edu # VERSION elif command == "VERSION": return "VERSION " + f("target", kwargs, '') # STATS # https://tools.ietf.org/html/rfc2812#section-3.4.4 # STATS [<query>] [<target>] # ---------- # STATS m remote.*.edu # STATS m # STATS elif command == "STATS": if "query" in kwargs: return "STATS {} {}".format(f("query", kwargs), f("target", kwargs, '')) return "STATS" # LINKS # https://tools.ietf.org/html/rfc2812#section-3.4.5 # LINKS [<remote>] [<mask>] # ---------- # LINKS *.edu *.bu.edu # LINKS *.au # LINKS elif command == "LINKS": if "remote" in kwargs: return "LINKS {} {}".format(f("remote", kwargs), f("mask", kwargs)) elif "mask" in kwargs: return "LINKS " + f("mask", kwargs) return "LINKS" # TIME # https://tools.ietf.org/html/rfc2812#section-3.4.6 # TIME [<target>] # ---------- # TIME remote.*.edu # TIME elif command == "TIME": return "TIME " + f("target", kwargs, '') # CONNECT # https://tools.ietf.org/html/rfc2812#section-3.4.7 # CONNECT <target> <port> [<remote>] # ---------- # CONNECT tolsun.oulu.fi 6667 *.edu # CONNECT tolsun.oulu.fi 6667 elif command == "CONNECT": return "CONNECT {} {} {}".format(f("target", kwargs), f("port", kwargs), f("remote", kwargs, '')) # TRACE # https://tools.ietf.org/html/rfc2812#section-3.4.8 # TRACE [<target>] # ---------- # TRACE elif command == "TRACE": return "TRACE " + f("target", kwargs, '') # ADMIN # https://tools.ietf.org/html/rfc2812#section-3.4.9 # ADMIN [<target>] # ---------- # ADMIN elif command == "ADMIN": return "ADMIN " + f("target", kwargs, '') # INFO # https://tools.ietf.org/html/rfc2812#section-3.4.10 # INFO [<target>] # ---------- # INFO elif command == "INFO": return "INFO " + f("target", kwargs, '') # SERVLIST # https://tools.ietf.org/html/rfc2812#section-3.5.1 # SERVLIST [<mask>] [<type>] # ---------- # SERVLIST *SERV 3 # SERVLIST *SERV # SERVLIST elif command == "SERVLIST": return "SERVLIST {} {}".format(f("mask", kwargs, ''), f("type", kwargs, '')) # SQUERY # https://tools.ietf.org/html/rfc2812#section-3.5.2 # SQUERY <target> :<message> # ---------- # SQUERY irchelp :HELP privmsg elif command == "SQUERY": return "SQUERY {} :{}".format(f("target", kwargs), f("message", kwargs)) # WHO # https://tools.ietf.org/html/rfc2812#section-3.6.1 # WHO [<mask>] ["o"] # ---------- # WHO jto* o # WHO *.fi # WHO elif command == "WHO": return "WHO {} {}".format(f("mask", kwargs, ''), b("o", kwargs)) # WHOIS # https://tools.ietf.org/html/rfc2812#section-3.6.2 # WHOIS <mask> [<target>] # ---------- # WHOIS jto* o remote.*.edu # WHOIS jto* o # WHOIS *.fi elif command == "WHOIS": return "WHOIS {} {}".format(pack("mask", kwargs), f("target", kwargs, '')) # WHOWAS # https://tools.ietf.org/html/rfc2812#section-3.6.3 # WHOWAS <nick> [<count>] [<target>] # ---------- # WHOWAS Wiz 9 remote.*.edu # WHOWAS Wiz 9 # WHOWAS Mermaid elif command == "WHOWAS": if "count" in kwargs: return "WHOWAS {} {} {}".format(pack("nick", kwargs), f("count", kwargs), f("target", kwargs, '')) return "WHOWAS " + pack("nick", kwargs) # KILL # https://tools.ietf.org/html/rfc2812#section-3.7.1 # KILL <nick> :<message> # ---------- # KILL WiZ :Spamming joins elif command == "KILL": return "KILL {} :{}".format(f("nick", kwargs), f("message", kwargs)) # PING # https://tools.ietf.org/html/rfc2812#section-3.7.2 # PING :[<message>] # ---------- # PING :I'm still here # PING elif command == "PING": if "message" in kwargs: return "PING :{}".format(f("message", kwargs)) else: return "PING" # PONG # https://tools.ietf.org/html/rfc2812#section-3.7.3 # PONG :[<message>] # ---------- # PONG :I'm still here # PONG elif command == "PONG": if "message" in kwargs: return "PONG :{}".format(f("message", kwargs)) else: return "PONG" # AWAY # https://tools.ietf.org/html/rfc2812#section-4.1 # AWAY :[<message>] # ---------- # AWAY :Gone to lunch. # AWAY elif command == "AWAY": if "message" in kwargs: return "AWAY :" + f("message", kwargs) return "AWAY" # REHASH # https://tools.ietf.org/html/rfc2812#section-4.2 # REHASH # ---------- # REHASH elif command == "REHASH": return "REHASH" # DIE # https://tools.ietf.org/html/rfc2812#section-4.3 # DIE # ---------- # DIE elif command == "DIE": return "DIE" # RESTART # https://tools.ietf.org/html/rfc2812#section-4.4 # RESTART # ---------- # RESTART elif command == "RESTART": return "RESTART" # SUMMON # https://tools.ietf.org/html/rfc2812#section-4.5 # SUMMON <nick> [<target>] [<channel>] # ---------- # SUMMON Wiz remote.*.edu #Finnish # SUMMON Wiz remote.*.edu # SUMMON Wiz elif command == "SUMMON": if "target" in kwargs: return "SUMMON {} {} {}".format(f("nick", kwargs), f("target", kwargs), f("channel", kwargs, '')) return "SUMMON " + f("nick", kwargs) # USERS # https://tools.ietf.org/html/rfc2812#section-4.6 # USERS [<target>] # ---------- # USERS remote.*.edu # USERS elif command == "USERS": return "USERS " + f("target", kwargs, '') # WALLOPS # https://tools.ietf.org/html/rfc2812#section-4.7 # WALLOPS :<message> # ---------- # WALLOPS :Maintenance in 5 minutes elif command == "WALLOPS": return "WALLOPS :" + f("message", kwargs) # USERHOST # https://tools.ietf.org/html/rfc2812#section-4.8 # USERHOST <nick> # ---------- # USERHOST Wiz Michael syrk # USERHOST syrk elif command == "USERHOST": return "USERHOST " + pack("nick", kwargs, sep=" ") # ISON # https://tools.ietf.org/html/rfc2812#section-4.9 # ISON <nick> # ---------- # ISON Wiz Michael syrk # ISON syrk elif command == "ISON": return "ISON " + pack("nick", kwargs, sep=" ") else: raise ValueError("Unknown command '{}'".format(command))
Open a connection to the defined server. async def connect(self) -> None: """Open a connection to the defined server.""" def protocol_factory() -> Protocol: return Protocol(client=self) _, protocol = await self.loop.create_connection( protocol_factory, host=self.host, port=self.port, ssl=self.ssl ) # type: Tuple[Any, Any] if self.protocol: self.protocol.close() self.protocol = protocol # TODO: Delete the following code line. It is currently kept in order # to not break the current existing codebase. Removing it requires a # heavy change in the test codebase. protocol.client = self self.trigger("client_connect")
Trigger all handlers for an event to (asynchronously) execute def trigger(self, event: str, **kwargs: Any) -> None: """Trigger all handlers for an event to (asynchronously) execute""" event = event.upper() for func in self._event_handlers[event]: self.loop.create_task(func(**kwargs)) # This will unblock anyone that is awaiting on the next loop update, # while still ensuring the next `await client.wait(event)` doesn't # immediately fire. async_event = self._events[event] async_event.set() async_event.clear()
Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever() def on(self, event: str, func: Optional[Callable] = None) -> Callable: """ Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever() """ if func is None: return functools.partial(self.on, event) # type: ignore wrapped = func if not asyncio.iscoroutinefunction(wrapped): wrapped = asyncio.coroutine(wrapped) self._event_handlers[event.upper()].append(wrapped) # Always return original return func
Send a message to the server. .. code-block:: python client.send("nick", nick="weatherbot") client.send("privmsg", target="#python", message="Hello, World!") def send(self, command: str, **kwargs: Any) -> None: """ Send a message to the server. .. code-block:: python client.send("nick", nick="weatherbot") client.send("privmsg", target="#python", message="Hello, World!") """ packed_command = pack_command(command, **kwargs).strip() self.send_raw(packed_command)
client callback entrance def _handle(self, nick, target, message, **kwargs): """ client callback entrance """ for regex, (func, pattern) in self.routes.items(): match = regex.match(message) if match: self.client.loop.create_task( func(nick, target, message, match, **kwargs))
parse commandline arguments and print result def main(): """parse commandline arguments and print result""" fcodes = collections.OrderedDict(( ('f.i', protocol.FLG_FORMAT_FDI), ('fi', protocol.FLG_FORMAT_FI), ('f.i.c', protocol.FLG_FORMAT_FDIDC), ('f.ic', protocol.FLG_FORMAT_FDIC), ('fi.c', protocol.FLG_FORMAT_FIDC), ('fic', protocol.FLG_FORMAT_FIC), )) def make_parser(): # command line parsing parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//hostname:port/path') # optional args for temperature scale parser.set_defaults(t_flags=protocol.FLG_TEMP_C) tempg = parser.add_mutually_exclusive_group() tempg.add_argument('-C', '--Celsius', const=protocol.FLG_TEMP_C, help='Celsius(default) temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-F', '--Fahrenheit', const=protocol.FLG_TEMP_F, help='Fahrenheit temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-K', '--Kelvin', const=protocol.FLG_TEMP_K, help='Kelvin temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-R', '--Rankine', const=protocol.FLG_TEMP_R, help='Rankine temperature scale', dest='t_flags', action='store_const', ) # optional arg for address format parser.set_defaults(format='f.i') parser.add_argument('-f', '--format', choices=fcodes, help='format for 1-wire unique serial IDs display') # optional arg for output format tempg = parser.add_mutually_exclusive_group() tempg.add_argument('--hex', action='store_true', help='write data in hex format') tempg.add_argument('-b', '--binary', action='store_true', help='output binary data') # debug output parser.add_argument('-d', '--debug', action='store_true', help='debug output') return parser def print_data(data): # format and print data if args.binary: if sys.version_info < (3, ): sys.stdout.write(data) else: sys.stdout.buffer.write(data) else: if args.hex: data = hexlify(data) else: try: data = data.decode('ascii') except UnicodeDecodeError: data = repr(data) print(data) # # main program starts here # # # parse command line arguments # parser = make_parser() args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) assert urlc.fragment == '' if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{0}:'".format(urlc.scheme)) if urlc.query: parser.error("Invalid URI, query component '?{0}' not allowed" .format(urlc.query)) try: host = urlc.hostname or 'localhost' port = urlc.port or 4304 except ValueError as error: parser.error("Invalid URI: invalid net location '//{0}/'" .format(urlc.netloc)) # # create owserver proxy object # try: owproxy = protocol.proxy( host, port, flags=args.t_flags | fcodes[args.format], verbose=args.debug, ) except protocol.ConnError as error: print("Unable to open connection to '{0}:{1}'\nSystem error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("'{0}:{1}' not an owserver?\nProtocol error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) # # query owserver and print results # try: if urlc.path.endswith('/'): for path in owproxy.dir(urlc.path, bus=True): print(path) else: data = owproxy.read(urlc.path) print_data(data) except protocol.OwnetError as error: print("Remote server error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("'{0}:{1}' buggy?\nProtocol error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1)
parse commandline arguments and print result def main(): """parse commandline arguments and print result""" # # setup command line parsing a la argpase # parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//hostname:port/path') # # parse command line args # args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) assert urlc.fragment == '' if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{0}:'".format(urlc.scheme)) if urlc.query: parser.error("Invalid URI, query component '?{0}' not allowed" .format(urlc.query)) try: host = urlc.hostname or 'localhost' port = urlc.port or 4304 except ValueError as error: parser.error("Invalid URI: invalid net location '//{0}/'" .format(urlc.netloc)) # # create owserver proxy object # try: owproxy = protocol.proxy(host, port, persistent=True) except protocol.ConnError as error: print("Unable to open connection to '{0}:{1}'\n{2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("Protocol error, '{0}:{1}' not an owserver?\n{2}" .format(host, port, error), file=sys.stderr) sys.exit(1) stress(owproxy, urlc.path)
Constructs the url for a cheddar API resource def build_url(self, path, params=None): ''' Constructs the url for a cheddar API resource ''' url = u'%s/%s/productCode/%s' % ( self.endpoint, path, self.product_code, ) if params: for key, value in params.items(): url = u'%s/%s/%s' % (url, key, value) return url
Makes a request to the cheddar api using the authentication and configuration settings available. def make_request(self, path, params=None, data=None, method=None): ''' Makes a request to the cheddar api using the authentication and configuration settings available. ''' # Setup values url = self.build_url(path, params) client_log.debug('Requesting: %s' % url) method = method or 'GET' body = None headers = {} if data: method = 'POST' body = urlencode(data) headers = { 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', } client_log.debug('Request Method: %s' % method) client_log.debug('Request Body(Data): %s' % data) client_log.debug('Request Body(Raw): %s' % body) # Setup http client h = httplib2.Http(cache=self.cache, timeout=self.timeout) #h.add_credentials(self.username, self.password) # Skip the normal http client behavior and send auth headers immediately # to save an http request. headers['Authorization'] = "Basic %s" % base64.standard_b64encode(self.username + ':' + self.password).strip() # Make request response, content = h.request(url, method, body=body, headers=headers) status = response.status client_log.debug('Response Status: %d' % status) client_log.debug('Response Content: %s' % content) if status != 200 and status != 302: exception_class = CheddarError if status == 401: exception_class = AccessDenied elif status == 400: exception_class = BadRequest elif status == 404: exception_class = NotFound elif status == 412: exception_class = PreconditionFailed elif status == 500: exception_class = CheddarFailure elif status == 502: exception_class = NaughtyGateway elif status == 422: exception_class = UnprocessableEntity raise exception_class(response, content) response.content = content return response
parse commandline arguments and print result def main(): """parse commandline arguments and print result""" # # setup command line parsing a la argpase # parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//server:port/entity') # optional args for temperature scale parser.set_defaults(t_flags=protocol.FLG_TEMP_C) tempg = parser.add_mutually_exclusive_group() tempg.add_argument('-C', '--Celsius', const=protocol.FLG_TEMP_C, help='Celsius(default) temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-F', '--Fahrenheit', const=protocol.FLG_TEMP_F, help='Fahrenheit temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-K', '--Kelvin', const=protocol.FLG_TEMP_K, help='Kelvin temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-R', '--Rankine', const=protocol.FLG_TEMP_R, help='Rankine temperature scale', dest='t_flags', action='store_const', ) # optional arg for address format fcodes = collections.OrderedDict(( ('f.i', protocol.FLG_FORMAT_FDI), ('fi', protocol.FLG_FORMAT_FI), ('f.i.c', protocol.FLG_FORMAT_FDIDC), ('f.ic', protocol.FLG_FORMAT_FDIC), ('fi.c', protocol.FLG_FORMAT_FIDC), ('fic', protocol.FLG_FORMAT_FIC), )) parser.set_defaults(format='f.i') parser.add_argument('-f', '--format', choices=fcodes, help='format for 1-wire unique serial IDs display') parser.add_argument('--nosys', '--only-sensors', action='store_false', dest='bus', help='do not descend system directories') # # parse command line args # args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{}:'".format(urlc.scheme)) assert not urlc.fragment if urlc.query: parser.error( "Invalid URI '{}', no query component allowed".format(args.uri)) host = urlc.hostname or 'localhost' port = urlc.port or 4304 # # create owserver proxy object # try: proxy = protocol.proxy( host, port, flags=args.t_flags | fcodes[args.format], persistent=True) except (protocol.ConnError, protocol.ProtocolError) as error: parser.exit(status=1, message=str(error) + '\n') def walk(path): try: if not path.endswith('/'): val = proxy.read(path) print("{:40} {!r}".format(path, val)) else: for entity in proxy.dir(path, bus=args.bus): walk(entity) except protocol.OwnetError as error: print('Unable to walk {}: server says {}'.format(path, error), file=sys.stderr) except protocol.ConnError as error: print('Unable to walk {}: {}'.format(path, error), file=sys.stderr) with proxy: walk(urlc.path)
factory function that returns a proxy object for an owserver at host, port. def proxy(host='localhost', port=4304, flags=0, persistent=False, verbose=False, ): """factory function that returns a proxy object for an owserver at host, port. """ # resolve host name/port try: gai = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP) except socket.gaierror as err: raise ConnError(*err.args) # gai is a (non empty) list of tuples, search for the first working one assert gai for (family, _type, _proto, _, sockaddr) in gai: assert _type is socket.SOCK_STREAM and _proto is socket.IPPROTO_TCP owp = _Proxy(family, sockaddr, flags, verbose) try: # check if there is an owserver listening owp.ping() except ConnError as err: # no connection, go over to next sockaddr lasterr = err.args continue else: # ok, live owserver found, stop searching break else: # no server listening on (family, sockaddr) found: raise ConnError(*lasterr) # init errno to errmessage mapping # FIXME: should this be only optional? owp._init_errcodes() if persistent: owp = clone(owp, persistent=True) # here we should have all connections closed assert not isinstance(owp, _PersistentProxy) or owp.conn is None return owp
factory function for cloning a proxy object def clone(proxy, persistent=True): """factory function for cloning a proxy object""" if not isinstance(proxy, _Proxy): raise TypeError('argument is not a Proxy object') if persistent: pclass = _PersistentProxy else: pclass = _Proxy return pclass(proxy._family, proxy._sockaddr, proxy.flags & ~FLG_PERSISTENCE, proxy.verbose, proxy.errmess)
shutdown connection def shutdown(self): """shutdown connection""" if self.verbose: print(self.socket.getsockname(), 'xx', self.peername) try: self.socket.shutdown(socket.SHUT_RDWR) except IOError as err: assert err.errno is _ENOTCONN, "unexpected IOError: %s" % err # remote peer has already closed the connection, # just ignore the exceeption pass
send message to server and return response def req(self, msgtype, payload, flags, size=0, offset=0, timeout=0): """send message to server and return response""" if timeout < 0: raise ValueError("timeout cannot be negative!") tohead = _ToServerHeader(payload=len(payload), type=msgtype, flags=flags, size=size, offset=offset) tstartcom = monotonic() # set timer when communication begins self._send_msg(tohead, payload) while True: fromhead, data = self._read_msg() if fromhead.payload >= 0: # we received a valid answer and return the result return fromhead.ret, fromhead.flags, data assert msgtype != MSG_NOP # we did not exit the loop because payload is negative # Server said PING to keep connection alive during lenghty op # check if timeout has expired if timeout: tcom = monotonic() - tstartcom if tcom > timeout: raise OwnetTimeout(tcom, timeout)
send message to server def _send_msg(self, header, payload): """send message to server""" if self.verbose: print('->', repr(header)) print('..', repr(payload)) assert header.payload == len(payload) try: sent = self.socket.send(header + payload) except IOError as err: raise ConnError(*err.args) # FIXME FIXME FIXME: # investigate under which situations socket.send should be retried # instead of aborted. # FIXME FIXME FIXME if sent < len(header + payload): raise ShortWrite(sent, len(header + payload)) assert sent == len(header + payload), sent
read message from server def _read_msg(self): """read message from server""" # # NOTE: # '_recv_socket(nbytes)' was implemented as # 'socket.recv(nbytes, socket.MSG_WAITALL)' # but socket.MSG_WAITALL proved not reliable # def _recv_socket(nbytes): """read nbytes bytes from self.socket""" # # code below is written under the assumption that # 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop # is entered rarerly # try: buf = self.socket.recv(nbytes) except IOError as err: raise ConnError(*err.args) if not buf: raise ShortRead(0, nbytes) while len(buf) < nbytes: try: tmp = self.socket.recv(nbytes - len(buf)) except IOError as err: raise ConnError(*err.args) if not tmp: if self.verbose: print('ee', repr(buf)) raise ShortRead(len(buf), nbytes) buf += tmp assert len(buf) == nbytes, (buf, len(buf), nbytes) return buf data = _recv_socket(_FromServerHeader.header_size) header = _FromServerHeader(data) if self.verbose: print('<-', repr(header)) # error conditions if header.version != 0: raise MalformedHeader('bad version', header) if header.payload > MAX_PAYLOAD: raise MalformedHeader('huge payload, unwilling to read', header) if header.payload > 0: payload = _recv_socket(header.payload) if self.verbose: print('..', repr(payload)) assert header.size <= header.payload payload = payload[:header.size] else: payload = bytes() return header, payload
retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data def sendmess(self, msgtype, payload, flags=0, size=0, offset=0, timeout=0): """ retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data """ flags |= self.flags assert not (flags & FLG_PERSISTENCE) with self._new_connection() as conn: ret, _, data = conn.req( msgtype, payload, flags, size, offset, timeout) return ret, data
sends a NOP packet and waits response; returns None def ping(self): """sends a NOP packet and waits response; returns None""" ret, data = self.sendmess(MSG_NOP, bytes()) if data or ret > 0: raise ProtocolError('invalid reply to ping message') if ret < 0: raise OwnetError(-ret, self.errmess[-ret])
returns True if there is an entity at path def present(self, path, timeout=0): """returns True if there is an entity at path""" ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path), timeout=timeout) assert ret <= 0 and not data, (ret, data) if ret < 0: return False else: return True
list entities at path def dir(self, path='/', slash=True, bus=False, timeout=0): """list entities at path""" if slash: msg = MSG_DIRALLSLASH else: msg = MSG_DIRALL if bus: flags = self.flags | FLG_BUS_RET else: flags = self.flags & ~FLG_BUS_RET ret, data = self.sendmess(msg, str2bytez(path), flags, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) if data: return bytes2str(data).split(',') else: return []
read data at path def read(self, path, size=MAX_PAYLOAD, offset=0, timeout=0): """read data at path""" if size > MAX_PAYLOAD: raise ValueError("size cannot exceed %d" % MAX_PAYLOAD) ret, data = self.sendmess(MSG_READ, str2bytez(path), size=size, offset=offset, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) return data
write data at path path is a string, data binary; it is responsability of the caller ensure proper encoding. def write(self, path, data, offset=0, timeout=0): """write data at path path is a string, data binary; it is responsability of the caller ensure proper encoding. """ # fixme: check of path type delayed to str2bytez if not isinstance(data, (bytes, bytearray, )): raise TypeError("'data' argument must be binary") ret, rdata = self.sendmess(MSG_WRITE, str2bytez(path) + data, size=len(data), offset=offset, timeout=timeout) assert not rdata, (ret, rdata) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path)
retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data def sendmess(self, msgtype, payload, flags=0, size=0, offset=0, timeout=0): """ retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data """ # reuse last valid connection or create new conn = self.conn or self._new_connection() # invalidate last connection self.conn = None flags |= self.flags assert (flags & FLG_PERSISTENCE) ret, rflags, data = conn.req( msgtype, payload, flags, size, offset, timeout) if rflags & FLG_PERSISTENCE: # persistence granted, save connection object for reuse self.conn = conn else: # discard connection object conn.shutdown() return ret, data
Returns all customers. Sometimes they are too much and cause internal server errors on CG. API call permits post parameters for filtering which tends to fix this https://cheddargetter.com/developers#all-customers filter_data Will be processed by urlencode and can be used for filtering Example value: [ ("subscriptionStatus": "activeOnly"), ("planCode[]": "100GB"), ("planCode[]": "200GB") ] def get_customers(self, filter_data=None): ''' Returns all customers. Sometimes they are too much and cause internal server errors on CG. API call permits post parameters for filtering which tends to fix this https://cheddargetter.com/developers#all-customers filter_data Will be processed by urlencode and can be used for filtering Example value: [ ("subscriptionStatus": "activeOnly"), ("planCode[]": "100GB"), ("planCode[]": "200GB") ] ''' customers = [] try: response = self.client.make_request(path='customers/get', data=filter_data) except NotFound: response = None if response: customer_parser = CustomersParser() customers_data = customer_parser.parse_xml(response.content) for customer_data in customers_data: customers.append(Customer(product=self, **customer_data)) return customers
This method does exactly what you think it does. Calling this method deletes all customer data in your cheddar product and the configured gateway. This action cannot be undone. DO NOT RUN THIS UNLESS YOU REALLY, REALLY, REALLY MEAN TO! def delete_all_customers(self): ''' This method does exactly what you think it does. Calling this method deletes all customer data in your cheddar product and the configured gateway. This action cannot be undone. DO NOT RUN THIS UNLESS YOU REALLY, REALLY, REALLY MEAN TO! ''' response = self.client.make_request( path='customers/delete-all/confirm/%d' % int(time()), method='POST' )
An estimated initial bill date for an account created today, based on available plan info. def initial_bill_date(self): ''' An estimated initial bill date for an account created today, based on available plan info. ''' time_to_start = None if self.initial_bill_count_unit == 'months': time_to_start = relativedelta(months=self.initial_bill_count) else: time_to_start = relativedelta(days=self.initial_bill_count) initial_bill_date = datetime.utcnow().date() + time_to_start return initial_bill_date
Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports. def charge(self, code, each_amount, quantity=1, description=None): ''' Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports. ''' each_amount = Decimal(each_amount) each_amount = each_amount.quantize(Decimal('.01')) data = { 'chargeCode': code, 'eachAmount': '%.2f' % each_amount, 'quantity': quantity, } if description: data['description'] = description response = self.product.client.make_request( path='customers/add-charge', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
Charges should be a list of charges to execute immediately. Each value in the charges diectionary should be a dictionary with the following keys: code Your code for this charge. This code will be displayed in the user's invoice and is limited to 36 characters. quantity A positive integer quantity. If not provided this value will default to 1. each_amount Positive or negative integer or decimal with two digit precision. A positive number will create a charge (debit). A negative number will create a credit. description An optional description for this charge which will be displayed on the user's invoice. def create_one_time_invoice(self, charges): ''' Charges should be a list of charges to execute immediately. Each value in the charges diectionary should be a dictionary with the following keys: code Your code for this charge. This code will be displayed in the user's invoice and is limited to 36 characters. quantity A positive integer quantity. If not provided this value will default to 1. each_amount Positive or negative integer or decimal with two digit precision. A positive number will create a charge (debit). A negative number will create a credit. description An optional description for this charge which will be displayed on the user's invoice. ''' data = {} for n, charge in enumerate(charges): each_amount = Decimal(charge['each_amount']) each_amount = each_amount.quantize(Decimal('.01')) data['charges[%d][chargeCode]' % n ] = charge['code'] data['charges[%d][quantity]' % n] = charge.get('quantity', 1) data['charges[%d][eachAmount]' % n] = '%.2f' % each_amount if 'description' in charge.keys(): data['charges[%d][description]' % n] = charge['description'] response = self.product.client.make_request( path='invoices/new', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
Set the item's quantity to the passed in amount. If nothing is passed in, a quantity of 1 is assumed. If a decimal value is passsed in, it is rounded to the 4th decimal place as that is the level of precision which the Cheddar API accepts. def set(self, quantity): ''' Set the item's quantity to the passed in amount. If nothing is passed in, a quantity of 1 is assumed. If a decimal value is passsed in, it is rounded to the 4th decimal place as that is the level of precision which the Cheddar API accepts. ''' data = {} data['quantity'] = self._normalize_quantity(quantity) response = self.subscription.customer.product.client.make_request( path = 'customers/set-item-quantity', params = { 'code': self.subscription.customer.code, 'itemCode': self.code, }, data = data, method = 'POST', ) return self.subscription.customer.load_data_from_xml(response.content)
Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return bool: whether the two names are compatible def deep_compare(self, other, settings): """ Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return bool: whether the two names are compatible """ if not self._is_compatible_with(other): return False first, middle, last = self._compare_components(other, settings) return first and middle and last
Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return int: sequence ratio match (out of 100) def ratio_deep_compare(self, other, settings): """ Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return int: sequence ratio match (out of 100) """ if not self._is_compatible_with(other): return 0 first, middle, last = self._compare_components(other, settings, True) f_weight, m_weight, l_weight = self._determine_weights(other, settings) total_weight = f_weight + m_weight + l_weight result = ( first * f_weight + middle * m_weight + last * l_weight ) / total_weight return result
Return True if names are not incompatible. This checks that the gender of titles and compatibility of suffixes def _is_compatible_with(self, other): """ Return True if names are not incompatible. This checks that the gender of titles and compatibility of suffixes """ title = self._compare_title(other) suffix = self._compare_suffix(other) return title and suffix
Return False if titles have different gender associations def _compare_title(self, other): """Return False if titles have different gender associations""" # If title is omitted, assume a match if not self.title or not other.title: return True titles = set(self.title_list + other.title_list) return not (titles & MALE_TITLES and titles & FEMALE_TITLES)
Return false if suffixes are mutually exclusive def _compare_suffix(self, other): """Return false if suffixes are mutually exclusive""" # If suffix is omitted, assume a match if not self.suffix or not other.suffix: return True # Check if more than one unique suffix suffix_set = set(self.suffix_list + other.suffix_list) unique_suffixes = suffix_set & UNIQUE_SUFFIXES for key in EQUIVALENT_SUFFIXES: if key in unique_suffixes: unique_suffixes.remove(key) unique_suffixes.add(EQUIVALENT_SUFFIXES[key]) return len(unique_suffixes) < 2
Return comparison of first, middle, and last components def _compare_components(self, other, settings, ratio=False): """Return comparison of first, middle, and last components""" first = compare_name_component( self.first_list, other.first_list, settings['first'], ratio, ) if settings['check_nickname']: if first is False: first = compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ) or compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ) elif ratio and first is not 100: first = max( compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ), compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ), first, ) middle = compare_name_component( self.middle_list, other.middle_list, settings['middle'], ratio, ) last = compare_name_component( self.last_list, other.last_list, settings['last'], ratio, ) return first, middle, last
Return weights of name components based on whether or not they were omitted def _determine_weights(self, other, settings): """ Return weights of name components based on whether or not they were omitted """ # TODO: Reduce weight for matches by prefix or initials first_is_used = settings['first']['required'] or \ self.first and other.first first_weight = settings['first']['weight'] if first_is_used else 0 middle_is_used = settings['middle']['required'] or \ self.middle and other.middle middle_weight = settings['middle']['weight'] if middle_is_used else 0 last_is_used = settings['last']['required'] or \ self.last and other.last last_weight = settings['last']['weight'] if last_is_used else 0 return first_weight, middle_weight, last_weight
:param app: :class:`sanic.Sanic` instance to rate limit. def init_app(self, app): """ :param app: :class:`sanic.Sanic` instance to rate limit. """ self.enabled = app.config.setdefault(C.ENABLED, True) self._swallow_errors = app.config.setdefault( C.SWALLOW_ERRORS, self._swallow_errors ) self._storage_options.update( app.config.get(C.STORAGE_OPTIONS, {}) ) self._storage = storage_from_string( self._storage_uri or app.config.setdefault(C.STORAGE_URL, 'memory://'), **self._storage_options ) strategy = ( self._strategy or app.config.setdefault(C.STRATEGY, 'fixed-window') ) if strategy not in STRATEGIES: raise ConfigurationError("Invalid rate limiting strategy %s" % strategy) self._limiter = STRATEGIES[strategy](self._storage) conf_limits = app.config.get(C.GLOBAL_LIMITS, None) if not self._global_limits and conf_limits: self._global_limits = [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(conf_limits) ] app.request_middleware.append(self.__check_request_limit)
decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return: def limit(self, limit_value, key_func=None, per_method=False, methods=None, error_message=None, exempt_when=None): """ decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return: """ return self.__limit_decorator(limit_value, key_func, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when)
decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response. def shared_limit(self, limit_value, scope, key_func=None, error_message=None, exempt_when=None): """ decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response. """ return self.__limit_decorator( limit_value, key_func, True, scope, error_message=error_message, exempt_when=exempt_when )
resets the storage if it supports being reset def reset(self): """ resets the storage if it supports being reset """ try: self._storage.reset() self.logger.info("Storage has been reset and all limits cleared") except NotImplementedError: self.logger.warning("This storage type does not support being reset")
Returns a bs4 object of the page requested def get_soup(page=''): """ Returns a bs4 object of the page requested """ content = requests.get('%s/%s' % (BASE_URL, page)).text return BeautifulSoup(content)
Takes two names and returns true if they describe the same person. :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return bool: the names match def match(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return bool: the names match """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.deep_compare(name2, settings)
Takes two names and returns true if they describe the same person. Uses difflib's sequence matching on a per-field basis for names :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return int: sequence ratio match (out of 100) def ratio(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. Uses difflib's sequence matching on a per-field basis for names :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return int: sequence ratio match (out of 100) """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.ratio_deep_compare(name2, settings)
Returns all 'tr' tag rows as a list of tuples. Each tuple is for a single story. def _get_zipped_rows(self, soup): """ Returns all 'tr' tag rows as a list of tuples. Each tuple is for a single story. """ # the table with all submissions table = soup.findChildren('table')[2] # get all rows but last 2 rows = table.findChildren(['tr'])[:-2] # remove the spacing rows # indices of spacing tr's spacing = range(2, len(rows), 3) rows = [row for (i, row) in enumerate(rows) if (i not in spacing)] # rank, title, domain info = [row for (i, row) in enumerate(rows) if (i % 2 == 0)] # points, submitter, comments detail = [row for (i, row) in enumerate(rows) if (i % 2 != 0)] # build a list of tuple for all post return zip(info, detail)
Builds and returns a list of stories (dicts) from the passed source. def _build_story(self, all_rows): """ Builds and returns a list of stories (dicts) from the passed source. """ # list to hold all stories all_stories = [] for (info, detail) in all_rows: #-- Get the into about a story --# # split in 3 cells info_cells = info.findAll('td') rank = int(info_cells[0].string[:-1]) title = '%s' % info_cells[2].find('a').string link = info_cells[2].find('a').get('href') # by default all stories are linking posts is_self = False # the link doesn't contains "http" meaning an internal link if link.find('item?id=') is -1: # slice " (abc.com) " domain = info_cells[2].findAll('span')[1].string[2:-1] else: link = '%s/%s' % (BASE_URL, link) domain = BASE_URL is_self = True #-- Get the into about a story --# #-- Get the detail about a story --# # split in 2 cells, we need only second detail_cell = detail.findAll('td')[1] # list of details we need, 5 count detail_concern = detail_cell.contents num_comments = -1 if re.match(r'^(\d+)\spoint.*', detail_concern[0].string) is not \ None: # can be a link or self post points = int(re.match(r'^(\d+)\spoint.*', detail_concern[ 0].string).groups()[0]) submitter = '%s' % detail_concern[2].string submitter_profile = '%s/%s' % (BASE_URL, detail_concern[ 2].get('href')) published_time = ' '.join(detail_concern[3].strip().split()[ :3]) comment_tag = detail_concern[4] story_id = int(re.match(r'.*=(\d+)', comment_tag.get( 'href')).groups()[0]) comments_link = '%s/item?id=%d' % (BASE_URL, story_id) comment_count = re.match(r'(\d+)\s.*', comment_tag.string) try: # regex matched, cast to int num_comments = int(comment_count.groups()[0]) except AttributeError: # did not match, assign 0 num_comments = 0 else: # this is a job post points = 0 submitter = '' submitter_profile = '' published_time = '%s' % detail_concern[0] comment_tag = '' try: story_id = int(re.match(r'.*=(\d+)', link).groups()[0]) except AttributeError: # job listing that points to external link story_id = -1 comments_link = '' comment_count = -1 #-- Get the detail about a story --# story = Story(rank, story_id, title, link, domain, points, submitter, published_time, submitter_profile, num_comments, comments_link, is_self) all_stories.append(story) return all_stories
Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30. def get_stories(self, story_type='', limit=30): """ Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30. """ if limit is None or limit < 1 or limit > 30: # we need at least 30 items limit = 30 stories_found = 0 # self.more = story_type # while we still have more stories to find while stories_found < limit: # get current page soup soup = get_soup(page=story_type) all_rows = self._get_zipped_rows(soup) # get a list of stories on current page stories = self._build_story(all_rows) # move to next page # self.more = self._get_next_page(soup) for story in stories: yield story stories_found += 1 # if enough stories found, return if stories_found == limit: return
Return the leaders of Hacker News def get_leaders(self, limit=10): """ Return the leaders of Hacker News """ if limit is None: limit = 10 soup = get_soup('leaders') table = soup.find('table') leaders_table = table.find_all('table')[1] listleaders = leaders_table.find_all('tr')[2:] listleaders.pop(10) # Removing because empty in the Leaders page for i, leader in enumerate(listleaders): if i == limit: return if not leader.text == '': item = leader.find_all('td') yield User(item[1].text, '', item[2].text, item[3].text)
Get the relative url of the next page (The "More" link at the bottom of the page) def _get_next_page(self, soup, current_page): """ Get the relative url of the next page (The "More" link at the bottom of the page) """ # Get the table with all the comments: if current_page == 1: table = soup.findChildren('table')[3] elif current_page > 1: table = soup.findChildren('table')[2] # the last row of the table contains the relative url of the next page anchor = table.findChildren(['tr'])[-1].find('a') if anchor and anchor.text == u'More': return anchor.get('href').lstrip(BASE_URL) else: return None
For the story, builds and returns a list of Comment objects. def _build_comments(self, soup): """ For the story, builds and returns a list of Comment objects. """ comments = [] current_page = 1 while True: # Get the table holding all comments: if current_page == 1: table = soup.findChildren('table')[3] elif current_page > 1: table = soup.findChildren('table')[2] # get all rows (each comment is duplicated twice) rows = table.findChildren(['tr']) # last row is more, second last is spacing rows = rows[:len(rows) - 2] # now we have unique comments only rows = [row for i, row in enumerate(rows) if (i % 2 == 0)] if len(rows) > 1: for row in rows: # skip an empty td if not row.findChildren('td'): continue # Builds a flat list of comments # level of comment, starting with 0 level = int(row.findChildren('td')[1].find('img').get( 'width')) // 40 spans = row.findChildren('td')[3].findAll('span') # span[0] = submitter details # [<a href="user?id=jonknee">jonknee</a>, u' 1 hour ago | ', <a href="item?id=6910978">link</a>] # span[1] = actual comment if str(spans[0]) != '<span class="comhead"></span>': # user who submitted the comment user = spans[0].contents[0].string # relative time of comment time_ago = spans[0].contents[1].string.strip( ).rstrip(' |') try: comment_id = int(re.match(r'item\?id=(.*)', spans[0].contents[ 2].get( 'href')).groups()[0]) except AttributeError: comment_id = int(re.match(r'%s/item\?id=(.*)' % BASE_URL, spans[0].contents[ 2].get( 'href')).groups()[0]) # text representation of comment (unformatted) body = spans[1].text if body[-2:] == '--': body = body[:-5] # html of comment, may not be valid try: pat = re.compile( r'<span class="comment"><font color=".*">(.*)</font></span>') body_html = re.match(pat, str(spans[1]).replace( '\n', '')).groups()[0] except AttributeError: pat = re.compile( r'<span class="comment"><font color=".*">(.*)</font></p><p><font size="1">') body_html = re.match(pat, str(spans[1]).replace( '\n', '')).groups()[0] else: # comment deleted user = '' time_ago = '' comment_id = -1 body = '[deleted]' body_html = '[deleted]' comment = Comment(comment_id, level, user, time_ago, body, body_html) comments.append(comment) # Move on to the next page of comments, or exit the loop if there # is no next page. next_page_url = self._get_next_page(soup, current_page) if not next_page_url: break soup = get_soup(page=next_page_url) current_page += 1 previous_comment = None # for comment in comments: # if comment.level == 0: # previous_comment = comment # else: # level_difference = comment.level - previous_comment.level # previous_comment.body_html += '\n' + '\t' * level_difference \ # + comment.body_html # previous_comment.body += '\n' + '\t' * level_difference + \ # comment.body return comments
Initializes an instance of Story for given item_id. It is assumed that the story referenced by item_id is valid and does not raise any HTTP errors. item_id is an int. def fromid(self, item_id): """ Initializes an instance of Story for given item_id. It is assumed that the story referenced by item_id is valid and does not raise any HTTP errors. item_id is an int. """ if not item_id: raise Exception('Need an item_id for a story') # get details about a particular story soup = get_item_soup(item_id) # this post has not been scraped, so we explititly get all info story_id = item_id rank = -1 # to extract meta information about the post info_table = soup.findChildren('table')[2] # [0] = title, domain, [1] = points, user, time, comments info_rows = info_table.findChildren('tr') # title, domain title_row = info_rows[0].findChildren('td')[1] title = title_row.find('a').text try: domain = title_row.find('span').string[2:-2] # domain found is_self = False link = title_row.find('a').get('href') except AttributeError: # self post domain = BASE_URL is_self = True link = '%s/item?id=%s' % (BASE_URL, item_id) # points, user, time, comments meta_row = info_rows[1].findChildren('td')[1].contents # [<span id="score_7024626">789 points</span>, u' by ', <a href="user?id=endianswap">endianswap</a>, # u' 8 hours ago | ', <a href="item?id=7024626">238 comments</a>] points = int(re.match(r'^(\d+)\spoint.*', meta_row[0].text).groups()[0]) submitter = meta_row[2].text submitter_profile = '%s/%s' % (BASE_URL, meta_row[2].get('href')) published_time = ' '.join(meta_row[3].strip().split()[:3]) comments_link = '%s/item?id=%s' % (BASE_URL, item_id) try: num_comments = int(re.match(r'(\d+)\s.*', meta_row[ 4].text).groups()[0]) except AttributeError: num_comments = 0 story = Story(rank, story_id, title, link, domain, points, submitter, published_time, submitter_profile, num_comments, comments_link, is_self) return story
Compare a list of names from a name component based on settings def compare_name_component(list1, list2, settings, use_ratio=False): """ Compare a list of names from a name component based on settings """ if not list1[0] or not list2[0]: not_required = not settings['required'] return not_required * 100 if use_ratio else not_required if len(list1) != len(list2): return False compare_func = _ratio_compare if use_ratio else _normal_compare return compare_func(list1, list2, settings)
Evaluates whether names match, or one name is the initial of the other def equate_initial(name1, name2): """ Evaluates whether names match, or one name is the initial of the other """ if len(name1) == 0 or len(name2) == 0: return False if len(name1) == 1 or len(name2) == 1: return name1[0] == name2[0] return name1 == name2
Evaluates whether names match, or one name prefixes another def equate_prefix(name1, name2): """ Evaluates whether names match, or one name prefixes another """ if len(name1) == 0 or len(name2) == 0: return False return name1.startswith(name2) or name2.startswith(name1)
Evaluates whether names match based on common nickname patterns This is not currently used in any name comparison def equate_nickname(name1, name2): """ Evaluates whether names match based on common nickname patterns This is not currently used in any name comparison """ # Convert '-ie' and '-y' to the root name nickname_regex = r'(.)\1(y|ie)$' root_regex = r'\1' name1 = re.sub(nickname_regex, root_regex, name1) name2 = re.sub(nickname_regex, root_regex, name2) if equate_prefix(name1, name2): return True return False
Converts unicode-specific characters to their equivalent ascii def make_ascii(word): """ Converts unicode-specific characters to their equivalent ascii """ if sys.version_info < (3, 0, 0): word = unicode(word) else: word = str(word) normalized = unicodedata.normalize('NFKD', word) return normalized.encode('ascii', 'ignore').decode('utf-8')
Returns sequence match ratio for two words def seq_ratio(word1, word2): """ Returns sequence match ratio for two words """ raw_ratio = SequenceMatcher(None, word1, word2).ratio() return int(round(100 * raw_ratio))
Updates the values in a nested dict, while unspecified values will remain unchanged def deep_update_dict(default, options): """ Updates the values in a nested dict, while unspecified values will remain unchanged """ for key in options.keys(): default_setting = default.get(key) new_setting = options.get(key) if isinstance(default_setting, dict): deep_update_dict(default_setting, new_setting) else: default[key] = new_setting
Get base output path for a list of songs for download. def template_to_base_path(template, google_songs): """Get base output path for a list of songs for download.""" if template == os.getcwd() or template == '%suggested%': base_path = os.getcwd() else: template = os.path.abspath(template) song_paths = [template_to_filepath(template, song) for song in google_songs] base_path = os.path.dirname(os.path.commonprefix(song_paths)) return base_path
Create a bit string of the given length, with the probability of each bit being set equal to bit_prob, which defaults to .5. Usage: # Create a random BitString of length 10 with mostly zeros. bits = BitString.random(10, bit_prob=.1) Arguments: length: An int, indicating the desired length of the result. bit_prob: A float in the range [0, 1]. This is the probability of any given bit in the result having a value of 1; default is .5, giving 0 and 1 equal probabilities of appearance for each bit's value. Return: A randomly generated BitString instance of the requested length. def random(cls, length, bit_prob=.5): """Create a bit string of the given length, with the probability of each bit being set equal to bit_prob, which defaults to .5. Usage: # Create a random BitString of length 10 with mostly zeros. bits = BitString.random(10, bit_prob=.1) Arguments: length: An int, indicating the desired length of the result. bit_prob: A float in the range [0, 1]. This is the probability of any given bit in the result having a value of 1; default is .5, giving 0 and 1 equal probabilities of appearance for each bit's value. Return: A randomly generated BitString instance of the requested length. """ assert isinstance(length, int) and length >= 0 assert isinstance(bit_prob, (int, float)) and 0 <= bit_prob <= 1 bits = 0 for _ in range(length): bits <<= 1 bits += (random.random() < bit_prob) return cls(bits, length)
Create a crossover template with the given number of points. The crossover template can be used as a mask to crossover two bitstrings of the same length. Usage: assert len(parent1) == len(parent2) template = BitString.crossover_template(len(parent1)) inv_template = ~template child1 = (parent1 & template) | (parent2 & inv_template) child2 = (parent1 & inv_template) | (parent2 & template) Arguments: length: An int, indicating the desired length of the result. points: An int, the number of crossover points. Return: A BitString instance of the requested length which can be used as a crossover template. def crossover_template(cls, length, points=2): """Create a crossover template with the given number of points. The crossover template can be used as a mask to crossover two bitstrings of the same length. Usage: assert len(parent1) == len(parent2) template = BitString.crossover_template(len(parent1)) inv_template = ~template child1 = (parent1 & template) | (parent2 & inv_template) child2 = (parent1 & inv_template) | (parent2 & template) Arguments: length: An int, indicating the desired length of the result. points: An int, the number of crossover points. Return: A BitString instance of the requested length which can be used as a crossover template. """ assert isinstance(length, int) and length >= 0 assert isinstance(points, int) and points >= 0 # Select the crossover points. points = random.sample(range(length + 1), points) # Prep the points for the loop. points.sort() points.append(length) # Fill the bits in with alternating ranges of 0 and 1 according to # the selected crossover points. previous = 0 include_range = bool(random.randrange(2)) bits = 0 for point in points: if point > previous: bits <<= point - previous if include_range: bits += (1 << (point - previous)) - 1 include_range = not include_range previous = point return cls(bits, length)
Returns the number of bits set to True in the bit string. Usage: assert BitString('00110').count() == 2 Arguments: None Return: An int, the number of bits with value 1. def count(self): """Returns the number of bits set to True in the bit string. Usage: assert BitString('00110').count() == 2 Arguments: None Return: An int, the number of bits with value 1. """ result = 0 bits = self._bits while bits: result += bits % 2 bits >>= 1 return result
Wait for an event on any channel. def drain_events(self, allowed_methods=None, timeout=None): """Wait for an event on any channel.""" return self.wait_multi(self.channels.values(), timeout=timeout)
Wait for an event on a channel. def wait_multi(self, channels, allowed_methods=None, timeout=None): """Wait for an event on a channel.""" chanmap = dict((chan.channel_id, chan) for chan in channels) chanid, method_sig, args, content = self._wait_multiple( chanmap.keys(), allowed_methods, timeout=timeout) channel = chanmap[chanid] if content \ and channel.auto_decode \ and hasattr(content, 'content_encoding'): try: content.body = content.body.decode(content.content_encoding) except Exception: pass amqp_method = channel._METHOD_MAP.get(method_sig, None) if amqp_method is None: raise Exception('Unknown AMQP method (%d, %d)' % method_sig) if content is None: return amqp_method(channel, args) else: return amqp_method(channel, args, content)
Establish connection to the AMQP broker. def establish_connection(self): """Establish connection to the AMQP broker.""" conninfo = self.connection if not conninfo.hostname: raise KeyError("Missing hostname for AMQP connection.") if conninfo.userid is None: raise KeyError("Missing user id for AMQP connection.") if conninfo.password is None: raise KeyError("Missing password for AMQP connection.") if not conninfo.port: conninfo.port = self.default_port return Connection(host=conninfo.host, userid=conninfo.userid, password=conninfo.password, virtual_host=conninfo.virtual_host, insist=conninfo.insist, ssl=conninfo.ssl, connect_timeout=conninfo.connect_timeout)
Check if a queue has been declared. :rtype bool: def queue_exists(self, queue): """Check if a queue has been declared. :rtype bool: """ try: self.channel.queue_declare(queue=queue, passive=True) except AMQPChannelException, e: if e.amqp_reply_code == 404: return False raise e else: return True
Delete queue by name. def queue_delete(self, queue, if_unused=False, if_empty=False): """Delete queue by name.""" return self.channel.queue_delete(queue, if_unused, if_empty)
Declare a named queue. def queue_declare(self, queue, durable, exclusive, auto_delete, warn_if_exists=False, arguments=None): """Declare a named queue.""" if warn_if_exists and self.queue_exists(queue): warnings.warn(QueueAlreadyExistsWarning( QueueAlreadyExistsWarning.__doc__)) return self.channel.queue_declare(queue=queue, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments)
Declare an named exchange. def exchange_declare(self, exchange, type, durable, auto_delete): """Declare an named exchange.""" return self.channel.exchange_declare(exchange=exchange, type=type, durable=durable, auto_delete=auto_delete)
Bind queue to an exchange using a routing key. def queue_bind(self, queue, exchange, routing_key, arguments=None): """Bind queue to an exchange using a routing key.""" return self.channel.queue_bind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments)