docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Reads artifact definitions from a file-like object. Args: file_object (file): file-like object to read from. Yields: ArtifactDefinition: an artifact definition. Raises: FormatError: if the format of the JSON artifact definition is not set or incorrect.
def ReadFileObject(self, file_object): # TODO: add try, except? json_definitions = json.loads(file_object.read()) last_artifact_definition = None for json_definition in json_definitions: try: artifact_definition = self.ReadArtifactDefinitionValues(json_definition) except errors.FormatError as exception: error_location = 'At start' if last_artifact_definition: error_location = 'After: {0:s}'.format(last_artifact_definition.name) raise errors.FormatError( '{0:s} {1!s}'.format(error_location, exception)) yield artifact_definition last_artifact_definition = artifact_definition
307,119
Reads artifact definitions from a file-like object. Args: file_object (file): file-like object to read from. Yields: ArtifactDefinition: an artifact definition. Raises: FormatError: if the format of the YAML artifact definition is not set or incorrect.
def ReadFileObject(self, file_object): # TODO: add try, except? yaml_generator = yaml.safe_load_all(file_object) last_artifact_definition = None for yaml_definition in yaml_generator: try: artifact_definition = self.ReadArtifactDefinitionValues(yaml_definition) except errors.FormatError as exception: error_location = 'At start' if last_artifact_definition: error_location = 'After: {0:s}'.format(last_artifact_definition.name) raise errors.FormatError( '{0:s} {1!s}'.format(error_location, exception)) yield artifact_definition last_artifact_definition = artifact_definition
307,120
Writes artifact definitions to a file. Args: artifacts (list[ArtifactDefinition]): artifact definitions to be written. filename (str): name of the file to write artifacts to.
def WriteArtifactsFile(self, artifacts, filename): with open(filename, 'w') as file_object: file_object.write(self.FormatArtifacts(artifacts))
307,121
Formats artifacts to desired output format. Args: artifacts (list[ArtifactDefinition]): artifact definitions. Returns: str: formatted string of artifact definition.
def FormatArtifacts(self, artifacts): artifact_definitions = [artifact.AsDict() for artifact in artifacts] json_data = json.dumps(artifact_definitions) return json_data
307,122
Formats artifacts to desired output format. Args: artifacts (list[ArtifactDefinition]): artifact definitions. Returns: str: formatted string of artifact definition.
def FormatArtifacts(self, artifacts): # TODO: improve output formatting of yaml artifact_definitions = [artifact.AsDict() for artifact in artifacts] yaml_data = yaml.safe_dump_all(artifact_definitions) return yaml_data
307,123
Initializes an artifact definition. Args: name (str): name that uniquely identifiers the artifact definition. description (Optional[str]): description of the artifact definition.
def __init__(self, name, description=None): super(ArtifactDefinition, self).__init__() self.conditions = [] self.description = description self.name = name self.labels = [] self.provides = [] self.sources = [] self.supported_os = [] self.urls = []
307,124
Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object
def from_b58check(private_key): b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big'))
307,592
Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object.
def from_int(i): point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point)
307,598
Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise.
def from_signature(message, signature): if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None
307,600
Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise.
def verify_bitcoin(message, signature, address): magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") ver, h160 = address_to_key_hash(address) hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig)
307,601
Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string
def address(self, compressed=True, testnet=False): version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii')
307,603
Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature.
def from_der(der): d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s)
307,606
Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length
def from_bytes(b): if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s)
307,607
Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized.
def from_bytes(b): if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv
307,612
Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key.
def to_b58check(self, testnet=False): b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b)
307,617
Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered.
def master_key_from_entropy(passphrase='', strength=128): if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = rand_bytes(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n
307,619
Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key.
def master_key_from_seed(seed): S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0)
307,620
Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey):
def from_parent(parent_key, i): if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint)
307,621
Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string
def address(self, compressed=True, testnet=False): return self._key.address(True, testnet)
307,629
Fit :class`Extractor` features and model to a training dataset. Args: blocks (List[Block]) labels (``np.ndarray``) weights (``np.ndarray``) Returns: :class`Extractor`
def fit(self, documents, labels, weights=None): block_groups = np.array([self.blockifier.blockify(doc) for doc in documents]) mask = [self._has_enough_blocks(blocks) for blocks in block_groups] block_groups = block_groups[mask] labels = np.concatenate(np.array(labels)[mask]) # TODO: This only 'fit's one doc at a time. No feature fitting actually # happens for now, but this might be important if the features change features_mat = np.concatenate([self.features.fit_transform(blocks) for blocks in block_groups]) if weights is None: self.model.fit(features_mat, labels) else: weights = np.concatenate(np.array(weights)[mask]) self.model.fit(features_mat, labels, sample_weight=weights) return self
308,882
Gather the html, labels, and weights of many files' data. Primarily useful for training/testing an :class`Extractor`. Args: data: Output of :func:`dragnet.data_processing.prepare_all_data`. Returns: Tuple[List[Block], np.array(int), np.array(int)]: All blocks, all labels, and all weights, respectively.
def get_html_labels_weights(self, data): all_html = [] all_labels = [] all_weights = [] for html, content, comments in data: all_html.append(html) labels, weights = self._get_labels_and_weights( content, comments) all_labels.append(labels) all_weights.append(weights) return np.array(all_html), np.array(all_labels), np.array(all_weights)
308,883
Predict class (content=1 or not-content=0) of the blocks in one or many HTML document(s). Args: documents (str or List[str]): HTML document(s) Returns: ``np.ndarray`` or List[``np.ndarray``]: array of binary predictions for content (1) or not-content (0).
def predict(self, documents, **kwargs): if isinstance(documents, (str, bytes, unicode_, np.unicode_)): return self._predict_one(documents, **kwargs) else: return np.concatenate([self._predict_one(doc, **kwargs) for doc in documents])
308,887
Predict class (content=1 or not-content=0) of each block in an HTML document. Args: documents (str): HTML document Returns: ``np.ndarray``: array of binary predictions for content (1) or not-content (0).
def _predict_one(self, document, encoding=None, return_blocks=False): # blockify blocks = self.blockifier.blockify(document, encoding=encoding) # get features try: features = self.features.transform(blocks) except ValueError: # Can't make features, predict no content preds = np.zeros((len(blocks))) # make predictions else: if self.prob_threshold is None: preds = self.model.predict(features) else: self._positive_idx = ( self._positive_idx or list(self.model.classes_).index(1)) preds = self.model.predict_proba(features) > self.prob_threshold preds = preds[:, self._positive_idx].astype(int) if return_blocks: return preds, blocks else: return preds
308,888
Evaluate the performance of an extractor model's binary classification predictions, typically at the block level, of whether a block is content or not. Args: y_true (``np.ndarray``) y_pred (``np.ndarray``) weights (``np.ndarray``) Returns: Dict[str, float]
def evaluate_model_predictions(y_true, y_pred, weights=None): if isinstance(y_pred[0], np.ndarray): y_pred = np.concatenate(y_pred) if isinstance(y_true[0], np.ndarray): y_true = np.concatenate(y_true) if (weights is not None) and (isinstance(weights[0], np.ndarray)): weights = np.concatenate(weights) accuracy = accuracy_score( y_true, y_pred, normalize=True, sample_weight=weights) precision = precision_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) recall = recall_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) f1 = f1_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
308,896
Read the HTML file corresponding to identifier ``fileroot`` in the raw HTML directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) Returns: str
def read_html_file(data_dir, fileroot, encoding=None): fname = os.path.join( data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT) encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1') # 'utf-16' for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: raw_html = f.read() break except (UnicodeDecodeError, UnicodeError): raw_html = None return ftfy.fix_encoding(raw_html).strip()
308,906
Read the gold standard content file corresponding to identifier ``fileroot`` in the gold standard directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) cetr (bool): if True, assume no comments and parse the gold standard to remove tags Returns: List[str, str]: contents string and comments string, respectively
def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False): fname = os.path.join( data_dir, GOLD_STANDARD_DIRNAME, fileroot + GOLD_STANDARD_EXT) encodings = (encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1') for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: gold_standard = f.read() break except (UnicodeDecodeError, UnicodeError): gold_standard = None if not gold_standard: return [u'', u''] if not cetr: content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1) # if no comments delimiter found, append empty comments string if len(content_comments) == 1: content_comments = [content_comments[0], u''] else: tree = etree.fromstring(gold_standard, parser=etree.HTMLParser()) content_comments = [u' '.join(text_from_subtree(tree)), u''] # fix text in case of mangled encodings content_comments = [ftfy.fix_encoding(content_comments[0]).strip(), ftfy.fix_encoding(content_comments[1]).strip()] return content_comments
308,907
Read the gold standard blocks file corresponding to identifier ``fileroot`` in the gold standard blocks directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) split_blocks (bool): If True, split the file's content into blocks. Returns: str or List[str]
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True): fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) with io.open(fname, mode='r') as f: data = f.read() if split_blocks: return filter(None, data[:-1].split('\n')) return filter(None, data)
308,908
Prepare data for all HTML + gold standard blocks examples in ``data_dir``. Args: data_dir (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]] See Also: :func:`prepare_data`
def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1): gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME) gs_blocks_filenames = get_filenames( gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT)) gs_blocks_fileroots = ( re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1) for gs_blocks_filename in gs_blocks_filenames) return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh) for fileroot in gs_blocks_fileroots]
308,911
Load a pickled ``Extractor`` model from disk. Args: filename (str): Name of pickled model file under ``dirname``. dirname (str): Name of directory on disk containing the pickled model. If None, dragnet's default pickled model directory is used: /path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION] Returns: :class:`dragnet.extractor.Extractor`
def load_pickled_model(filename, dirname=None): if dirname is None: pkg_filename = pkgutil.get_loader('dragnet').get_filename('dragnet') pkg_dirname = os.path.dirname(pkg_filename) dirname = os.path.join(pkg_dirname, 'pickled_models', model_path) filepath = os.path.join(dirname, filename) return joblib.load(filepath)
308,926
Do the TTS API request and write bytes to a file-like object. Args: fp (file object): Any file-like object to write the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request. TypeError: When ``fp`` is not a file-like object that takes bytes.
def write_to_fp(self, fp): # When disabling ssl verify in requests (for proxies and firewalls), # urllib3 prints an insecure warning on stdout. We disable that. urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) text_parts = self._tokenize(self.text) log.debug("text_parts: %i", len(text_parts)) assert text_parts, 'No text to send to TTS API' for idx, part in enumerate(text_parts): try: # Calculate token part_tk = self.token.calculate_token(part) except requests.exceptions.RequestException as e: # pragma: no cover log.debug(str(e), exc_info=True) raise gTTSError( "Connection error during token calculation: %s" % str(e)) payload = {'ie': 'UTF-8', 'q': part, 'tl': self.lang, 'ttsspeed': self.speed, 'total': len(text_parts), 'idx': idx, 'client': 'tw-ob', 'textlen': _len(part), 'tk': part_tk} log.debug("payload-%i: %s", idx, payload) try: # Request r = requests.get(self.GOOGLE_TTS_URL, params=payload, headers=self.GOOGLE_TTS_HEADERS, proxies=urllib.request.getproxies(), verify=False) log.debug("headers-%i: %s", idx, r.request.headers) log.debug("url-%i: %s", idx, r.request.url) log.debug("status-%i: %s", idx, r.status_code) r.raise_for_status() except requests.exceptions.HTTPError: # Request successful, bad response raise gTTSError(tts=self, response=r) except requests.exceptions.RequestException as e: # pragma: no cover # Request failed raise gTTSError(str(e)) try: # Write for chunk in r.iter_content(chunk_size=1024): fp.write(chunk) log.debug("part-%i written to %s", idx, fp) except (AttributeError, TypeError) as e: raise TypeError( "'fp' is not a file-like object or it does not take bytes: %s" % str(e))
309,286
Do the TTS API request and write result to file. Args: savefile (string): The path and file name to save the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request.
def save(self, savefile): with open(str(savefile), 'wb') as f: self.write_to_fp(f) log.debug("Saved to %s", savefile)
309,287
Run each regex substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied.
def run(self, text): for regex in self.regexes: text = regex.sub(self.repl, text) return text
309,294
Run each substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied.
def run(self, text): for pp in self.pre_processors: text = pp.run(text) return text
309,297
Calculate ROUGE scores between each pair of lines (hyp_file[i], ref_file[i]). Args: * hyp_path: hypothesis file path * ref_path: references file path * avg (False): whether to get an average scores or a list
def get_scores(self, avg=False, ignore_empty=False): hyp_path, ref_path = self.hyp_path, self.ref_path with io.open(hyp_path, encoding="utf-8", mode="r") as hyp_file: hyps = [line[:-1] for line in hyp_file] with io.open(ref_path, encoding="utf-8", mode="r") as ref_file: refs = [line[:-1] for line in ref_file] return self.rouge.get_scores(hyps, refs, avg=avg, ignore_empty=ignore_empty)
309,306
Handle the orelse part of an if or try node. Args: orelse(list[Node]) test(Node) Returns: The last nodes of the orelse branch.
def handle_or_else(self, orelse, test): if isinstance(orelse[0], ast.If): control_flow_node = self.visit(orelse[0]) # Prefix the if label with 'el' control_flow_node.test.label = 'el' + control_flow_node.test.label test.connect(control_flow_node.test) return control_flow_node.last_nodes else: else_connect_statements = self.stmt_star_handler( orelse, prev_node_to_avoid=self.nodes[-1] ) test.connect(else_connect_statements.first_statement) return else_connect_statements.last_statements
310,434
Save the local scope before entering a function call by saving all the LHS's of assignments so far. Args: line_number(int): Of the def of the function call about to be entered into. saved_function_call_index(int): Unique number for each call. Returns: saved_variables(list[SavedVariable]) first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.
def save_local_scope( self, line_number, saved_function_call_index ): saved_variables = list() saved_variables_so_far = set() first_node = None # Make e.g. save_N_LHS = assignment.LHS for each AssignmentNode for assignment in [node for node in self.nodes if (type(node) == AssignmentNode or type(node) == AssignmentCallNode or type(Node) == BBorBInode)]: # type() is used on purpose here if assignment.left_hand_side in saved_variables_so_far: continue saved_variables_so_far.add(assignment.left_hand_side) save_name = 'save_{}_{}'.format(saved_function_call_index, assignment.left_hand_side) previous_node = self.nodes[-1] saved_scope_node = RestoreNode( save_name + ' = ' + assignment.left_hand_side, save_name, [assignment.left_hand_side], line_number=line_number, path=self.filenames[-1] ) if not first_node: first_node = saved_scope_node self.nodes.append(saved_scope_node) # Save LHS saved_variables.append(SavedVariable(LHS=save_name, RHS=assignment.left_hand_side)) self.connect_if_allowed(previous_node, saved_scope_node) return (saved_variables, first_node)
310,467
Visits the nodes of a user defined function. Args: definition(LocalModuleDefinition): Definition of the function being added. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function. Returns: the_new_nodes(list[Node]): The nodes added while visiting the function. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.
def visit_and_get_function_nodes( self, definition, first_node ): len_before_visiting_func = len(self.nodes) previous_node = self.nodes[-1] entry_node = self.append_node(EntryOrExitNode('Function Entry ' + definition.name)) if not first_node: first_node = entry_node self.connect_if_allowed(previous_node, entry_node) function_body_connect_statements = self.stmt_star_handler(definition.node.body) entry_node.connect(function_body_connect_statements.first_statement) exit_node = self.append_node(EntryOrExitNode('Exit ' + definition.name)) exit_node.connect_predecessors(function_body_connect_statements.last_statements) the_new_nodes = self.nodes[len_before_visiting_func:] return_connection_handler(the_new_nodes, exit_node) return (the_new_nodes, first_node)
310,470
Restore the previously saved variables to their original values. Args: saved_variables(list[SavedVariable]) args_mapping(dict): A mapping of call argument to definition argument. line_number(int): Of the def of the function call about to be entered into. Note: We do not need connect_if_allowed because of the preceding call to save_local_scope.
def restore_saved_local_scope( self, saved_variables, args_mapping, line_number ): restore_nodes = list() for var in saved_variables: # Is var.RHS a call argument? if var.RHS in args_mapping: # If so, use the corresponding definition argument for the RHS of the label. restore_nodes.append(RestoreNode( var.RHS + ' = ' + args_mapping[var.RHS], var.RHS, [var.LHS], line_number=line_number, path=self.filenames[-1] )) else: # Create a node for e.g. foo = save_1_foo restore_nodes.append(RestoreNode( var.RHS + ' = ' + var.LHS, var.RHS, [var.LHS], line_number=line_number, path=self.filenames[-1] )) # Chain the restore nodes for node, successor in zip(restore_nodes, restore_nodes[1:]): node.connect(successor) if restore_nodes: # Connect the last node to the first restore node self.nodes[-1].connect(restore_nodes[0]) self.nodes.extend(restore_nodes) return restore_nodes
310,471
Handle the return from a function during a function call. Args: call_node(ast.Call) : The node that calls the definition. function_nodes(list[Node]): List of nodes of the function being called. saved_function_call_index(int): Unique number for each call. first_node(EntryOrExitNode or RestoreNode): Used to connect previous statements to this function.
def return_handler( self, call_node, function_nodes, saved_function_call_index, first_node ): if any(isinstance(node, YieldNode) for node in function_nodes): # Presence of a `YieldNode` means that the function is a generator rhs_prefix = 'yld_' elif any(isinstance(node, ConnectToExitNode) for node in function_nodes): # Only `Return`s and `Raise`s can be of type ConnectToExitNode rhs_prefix = 'ret_' else: return # No return value # Create e.g. ~call_1 = ret_func_foo RestoreNode LHS = CALL_IDENTIFIER + 'call_' + str(saved_function_call_index) RHS = rhs_prefix + get_call_names_as_string(call_node.func) return_node = RestoreNode( LHS + ' = ' + RHS, LHS, [RHS], line_number=call_node.lineno, path=self.filenames[-1] ) return_node.first_node = first_node self.nodes[-1].connect(return_node) self.nodes.append(return_node)
310,472
Prints issues in color-coded text format. Args: vulnerabilities: list of vulnerabilities to report fileobj: The output file object, which may be sys.stdout
def report( vulnerabilities, fileobj, print_sanitised, ): n_vulnerabilities = len(vulnerabilities) unsanitised_vulnerabilities = [v for v in vulnerabilities if not isinstance(v, SanitisedVulnerability)] n_unsanitised = len(unsanitised_vulnerabilities) n_sanitised = n_vulnerabilities - n_unsanitised heading = "{} vulnerabilit{} found{}.\n".format( 'No' if n_unsanitised == 0 else n_unsanitised, 'y' if n_unsanitised == 1 else 'ies', " (plus {} sanitised)".format(n_sanitised) if n_sanitised else "", ) vulnerabilities_to_print = vulnerabilities if print_sanitised else unsanitised_vulnerabilities with fileobj: for i, vulnerability in enumerate(vulnerabilities_to_print, start=1): fileobj.write(vulnerability_to_str(i, vulnerability)) if n_unsanitised == 0: fileobj.write(color(heading, GOOD)) else: fileobj.write(color(heading, DANGER))
310,477
Create a Node that can be used in a CFG. Args: label(str): The label of the node, describing its expression. line_number(Optional[int]): The line of the expression of the Node.
def __init__(self, label, ast_node, *, line_number=None, path): self.label = label self.ast_node = ast_node if line_number: self.line_number = line_number elif ast_node: self.line_number = ast_node.lineno else: self.line_number = None self.path = path self.ingoing = list() self.outgoing = list()
310,505
Create a Restore node. Args: label(str): The label of the node, describing the expression it represents. left_hand_side(str): The variable on the left hand side of the assignment. Used for analysis. right_hand_side_variables(list[str]): A list of variables on the right hand side. line_number(Optional[int]): The line of the expression the Node represents. path(string): Current filename.
def __init__(self, label, left_hand_side, right_hand_side_variables, *, line_number, path): super().__init__(label, left_hand_side, None, right_hand_side_variables, line_number=line_number, path=path)
310,513
Identify sources, sinks and sanitisers in a CFG. Args: cfg(CFG): CFG to find sources, sinks and sanitisers in. sources(tuple): list of sources, a source is a (source, sanitiser) tuple. sinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple. nosec_lines(set): lines with # nosec whitelisting Returns: Triggers tuple with sink and source nodes and a sanitiser node dict.
def identify_triggers( cfg, sources, sinks, lattice, nosec_lines ): assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode) tainted_nodes = filter_cfg_nodes(cfg, TaintedNode) tainted_trigger_nodes = [ TriggerNode( Source('Framework function URL parameter'), cfg_node=node ) for node in tainted_nodes ] sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines) sources_in_file.extend(tainted_trigger_nodes) find_secondary_sources(assignment_nodes, sources_in_file, lattice) sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines) sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file) return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict)
310,536
Sets the secondary_nodes attribute of each source in the sources list. Args: assignment_nodes([AssignmentNode]) sources([tuple]) lattice(Lattice): the lattice we're analysing.
def find_secondary_sources( assignment_nodes, sources, lattice ): for source in sources: source.secondary_nodes = find_assignments(assignment_nodes, source, lattice)
310,538
Find triggers from the trigger_word_list in the nodes. Args: nodes(list[Node]): the nodes to find triggers in. trigger_word_list(list[Union[Sink, Source]]): list of trigger words to look for. nosec_lines(set): lines with # nosec whitelisting Returns: List of found TriggerNodes
def find_triggers( nodes, trigger_words, nosec_lines ): trigger_nodes = list() for node in nodes: if node.line_number not in nosec_lines: trigger_nodes.extend(iter(label_contains(node, trigger_words))) return trigger_nodes
310,542
Determine if node contains any of the trigger_words provided. Args: node(Node): CFG node to check. trigger_words(list[Union[Sink, Source]]): list of trigger words to look for. Returns: Iterable of TriggerNodes found. Can be multiple because multiple trigger_words can be in one node.
def label_contains( node, triggers ): for trigger in triggers: if trigger.trigger_word in node.label: yield TriggerNode(trigger, node)
310,543
Build a dict of string -> TriggerNode pairs, where the string is the sanitiser and the TriggerNode is a TriggerNode of the sanitiser. Args: cfg(CFG): cfg to traverse. sinks_in_file(list[TriggerNode]): list of TriggerNodes containing the sinks in the file. Returns: A string -> TriggerNode dict.
def build_sanitiser_node_dict( cfg, sinks_in_file ): sanitisers = list() for sink in sinks_in_file: sanitisers.extend(sink.sanitisers) sanitisers_in_file = list() for sanitiser in sanitisers: for cfg_node in cfg.nodes: if sanitiser in cfg_node.label: sanitisers_in_file.append(Sanitiser(sanitiser, cfg_node)) sanitiser_node_dict = dict() for sanitiser in sanitisers: sanitiser_node_dict[sanitiser] = list(find_sanitiser_nodes( sanitiser, sanitisers_in_file )) return sanitiser_node_dict
310,544
Find nodes containing a particular sanitiser. Args: sanitiser(string): sanitiser to look for. sanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser. Returns: Iterable of sanitiser nodes.
def find_sanitiser_nodes( sanitiser, sanitisers_in_file ): for sanitiser_tuple in sanitisers_in_file: if sanitiser == sanitiser_tuple.trigger_word: yield sanitiser_tuple.cfg_node
310,545
Traverses the def-use graph to find all paths from source to sink that cause a vulnerability. Args: current_node() sink() def_use(dict): chain(list(Node)): A path of nodes between source and sink.
def get_vulnerability_chains( current_node, sink, def_use, chain=[] ): for use in def_use[current_node]: if use == sink: yield chain else: vuln_chain = list(chain) vuln_chain.append(use) yield from get_vulnerability_chains( use, sink, def_use, vuln_chain )
310,548
Find vulnerabilities in a list of CFGs from a trigger_word_file. Args: cfg_list(list[CFG]): the list of CFGs to scan. blackbox_mapping_file(str) sources_and_sinks_file(str) interactive(bool): determines if we ask the user about blackbox functions not in the mapping file. Returns: A list of vulnerabilities.
def find_vulnerabilities( cfg_list, blackbox_mapping_file, sources_and_sinks_file, interactive=False, nosec_lines=defaultdict(set) ): vulnerabilities = list() definitions = parse(sources_and_sinks_file) with open(blackbox_mapping_file) as infile: blackbox_mapping = json.load(infile) for cfg in cfg_list: find_vulnerabilities_in_cfg( cfg, definitions, Lattice(cfg.nodes), blackbox_mapping, vulnerabilities, interactive, nosec_lines ) if interactive: with open(blackbox_mapping_file, 'w') as outfile: json.dump(blackbox_mapping, outfile, indent=4) return vulnerabilities
310,553
Replace any aliases in label with the fully qualified name. Args: label -- A label : str representing a name (e.g. myos.system) aliases -- A dict of {alias: real_name} (e.g. {'myos': 'os'}) >>> fully_qualify_alias_labels('myos.mycall', {'myos':'os'}) 'os.mycall'
def fully_qualify_alias_labels(label, aliases): for alias, full_name in aliases.items(): if label == alias: return full_name elif label.startswith(alias+'.'): return full_name + label[len(alias):] return label
310,669
Generate an Abstract Syntax Tree using the ast module. Args: path(str): The path to the file e.g. example/foo/bar.py
def generate_ast(path): if os.path.isfile(path): with open(path, 'r') as f: try: tree = ast.parse(f.read()) return PytTransformer().visit(tree) except SyntaxError: # pragma: no cover global recursive if not recursive: _convert_to_3(path) recursive = True return generate_ast(path) else: raise SyntaxError('The ast module can not parse the file' ' and the python 2 to 3 conversion' ' also failed.') raise IOError('Input needs to be a file. Path: ' + path)
310,673
Argument container class. Args: args(list(ast.args): The arguments in a function AST node.
def __init__(self, args): self.args = args.args self.varargs = args.vararg self.kwarg = args.kwarg self.kwonlyargs = args.kwonlyargs self.defaults = args.defaults self.kw_defaults = args.kw_defaults self.arguments = list() if self.args: self.arguments.extend([x.arg for x in self.args]) if self.varargs: self.arguments.extend(self.varargs.arg) if self.kwarg: self.arguments.extend(self.kwarg.arg) if self.kwonlyargs: self.arguments.extend([x.arg for x in self.kwonlyargs])
310,675
Prints issues in JSON format. Args: vulnerabilities: list of vulnerabilities to report fileobj: The output file object, which may be sys.stdout
def report( vulnerabilities, fileobj, print_sanitised, ): TZ_AGNOSTIC_FORMAT = "%Y-%m-%dT%H:%M:%SZ" time_string = datetime.utcnow().strftime(TZ_AGNOSTIC_FORMAT) machine_output = { 'generated_at': time_string, 'vulnerabilities': [ vuln.as_dict() for vuln in vulnerabilities if print_sanitised or not isinstance(vuln, SanitisedVulnerability) ] } result = json.dumps( machine_output, indent=4 ) with fileobj: fileobj.write(result)
310,678
infer a function argument value according to the call context Arguments: funcnode: The function being called. name: The name of the argument whose value is being inferred. context: Inference context object
def infer_argument(self, funcnode, name, context): if name in self.duplicated_keywords: raise exceptions.InferenceError( "The arguments passed to {func!r} " " have duplicate keywords.", call_site=self, func=funcnode, arg=name, context=context, ) # Look into the keywords first, maybe it's already there. try: return self.keyword_arguments[name].infer(context) except KeyError: pass # Too many arguments given and no variable arguments. if len(self.positional_arguments) > len(funcnode.args.args): if not funcnode.args.vararg: raise exceptions.InferenceError( "Too many positional arguments " "passed to {func!r} that does " "not have *args.", call_site=self, func=funcnode, arg=name, context=context, ) positional = self.positional_arguments[: len(funcnode.args.args)] vararg = self.positional_arguments[len(funcnode.args.args) :] argindex = funcnode.args.find_argname(name)[0] kwonlyargs = {arg.name for arg in funcnode.args.kwonlyargs} kwargs = { key: value for key, value in self.keyword_arguments.items() if key not in kwonlyargs } # If there are too few positionals compared to # what the function expects to receive, check to see # if the missing positional arguments were passed # as keyword arguments and if so, place them into the # positional args list. if len(positional) < len(funcnode.args.args): for func_arg in funcnode.args.args: if func_arg.name in kwargs: arg = kwargs.pop(func_arg.name) positional.append(arg) if argindex is not None: # 2. first argument of instance/class method if argindex == 0 and funcnode.type in ("method", "classmethod"): if context.boundnode is not None: boundnode = context.boundnode else: # XXX can do better ? boundnode = funcnode.parent.frame() if isinstance(boundnode, nodes.ClassDef): # Verify that we're accessing a method # of the metaclass through a class, as in # `cls.metaclass_method`. In this case, the # first argument is always the class. method_scope = funcnode.parent.scope() if method_scope is boundnode.metaclass(): return iter((boundnode,)) if funcnode.type == "method": if not isinstance(boundnode, bases.Instance): boundnode = bases.Instance(boundnode) return iter((boundnode,)) if funcnode.type == "classmethod": return iter((boundnode,)) # if we have a method, extract one position # from the index, so we'll take in account # the extra parameter represented by `self` or `cls` if funcnode.type in ("method", "classmethod"): argindex -= 1 # 2. search arg index try: return self.positional_arguments[argindex].infer(context) except IndexError: pass if funcnode.args.kwarg == name: # It wants all the keywords that were passed into # the call site. if self.has_invalid_keywords(): raise exceptions.InferenceError( "Inference failed to find values for all keyword arguments " "to {func!r}: {unpacked_kwargs!r} doesn't correspond to " "{keyword_arguments!r}.", keyword_arguments=self.keyword_arguments, unpacked_kwargs=self._unpacked_kwargs, call_site=self, func=funcnode, arg=name, context=context, ) kwarg = nodes.Dict( lineno=funcnode.args.lineno, col_offset=funcnode.args.col_offset, parent=funcnode.args, ) kwarg.postinit( [(nodes.const_factory(key), value) for key, value in kwargs.items()] ) return iter((kwarg,)) if funcnode.args.vararg == name: # It wants all the args that were passed into # the call site. if self.has_invalid_arguments(): raise exceptions.InferenceError( "Inference failed to find values for all positional " "arguments to {func!r}: {unpacked_args!r} doesn't " "correspond to {positional_arguments!r}.", positional_arguments=self.positional_arguments, unpacked_args=self._unpacked_args, call_site=self, func=funcnode, arg=name, context=context, ) args = nodes.Tuple( lineno=funcnode.args.lineno, col_offset=funcnode.args.col_offset, parent=funcnode.args, ) args.postinit(vararg) return iter((args,)) # Check if it's a default parameter. try: return funcnode.args.default_value(name).infer(context) except exceptions.NoDefault: pass raise exceptions.InferenceError( "No value found for argument {name} to " "{func!r}", call_site=self, func=funcnode, arg=name, context=context, )
310,857
Splits registration ids in several lists of max 1000 registration ids per list Args: registration_ids (list): FCM device registration ID Yields: generator: list including lists with registration ids
def registration_id_chunks(self, registration_ids): try: xrange except NameError: xrange = range # Yield successive 1000-sized (max fcm recipients per request) chunks from registration_ids for i in xrange(0, len(registration_ids), self.FCM_MAX_RECIPIENTS): yield registration_ids[i:i + self.FCM_MAX_RECIPIENTS]
312,132
Standardized json.dumps function with separators and sorted keys set Args: data (dict or list): data to be dumped Returns: string: json
def json_dumps(self, data): return json.dumps( data, separators=(',', ':'), sort_keys=True, cls=self.json_encoder, ensure_ascii=False ).encode('utf8')
312,133
Makes a request for registration info and returns the response object Args: registration_id: id to be checked Returns: response of registration info request
def registration_info_request(self, registration_id): return self.requests_session.get( self.INFO_END_POINT + registration_id, params={'details': 'true'} )
312,137
Checks registration ids and excludes inactive ids Args: registration_ids (list, optional): list of ids to be cleaned Returns: list: cleaned registration ids
def clean_registration_ids(self, registration_ids=[]): valid_registration_ids = [] for registration_id in registration_ids: details = self.registration_info_request(registration_id) if details.status_code == 200: valid_registration_ids.append(registration_id) return valid_registration_ids
312,138
Returns details related to a registration id if it exists otherwise return None Args: registration_id: id to be checked Returns: dict: info about registration id None: if id doesn't exist
def get_registration_id_info(self, registration_id): response = self.registration_info_request(registration_id) if response.status_code == 200: return response.json() return None
312,139
Subscribes a list of registration ids to a topic Args: registration_ids (list): ids to be subscribed topic_name (str): name of topic Returns: True: if operation succeeded Raises: InvalidDataError: data sent to server was incorrectly formatted FCMError: an error occured on the server
def subscribe_registration_ids_to_topic(self, registration_ids, topic_name): url = 'https://iid.googleapis.com/iid/v1:batchAdd' payload = { 'to': '/topics/' + topic_name, 'registration_tokens': registration_ids, } response = self.requests_session.post(url, json=payload) if response.status_code == 200: return True elif response.status_code == 400: error = response.json() raise InvalidDataError(error['error']) else: raise FCMError()
312,140
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment.
def IsErrorSuppressedByNolint(category, linenum): return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
312,371
Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckOperatorSpacing(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. Those are checked separately # in CheckRValueReference match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1))
312,376
Check if the token ending on (linenum, column) is the end of template<>. Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is end of a template parameter list, False otherwise.
def IsTemplateParameterList(clean_lines, linenum, column): (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, column) if (startpos > -1 and Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): return True return False
312,377
Check if current constructor or operator is deleted or default. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if this is a deleted or default constructor.
def IsDeletedOrDefault(clean_lines, linenum): open_paren = clean_lines.elided[linenum].find('(') if open_paren < 0: return False (close_line, _, close_paren) = CloseExpression( clean_lines, linenum, open_paren) if close_paren < 0: return False return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
312,379
Check if RValue reference is allowed on a particular line. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. typenames: set of type names from template-argument-list. Returns: True if line is within the region where RValue references are allowed.
def IsRValueAllowed(clean_lines, linenum, typenames): # Allow region marked by PUSH/POP macros for i in xrange(linenum, 0, -1): line = clean_lines.elided[i] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): if not line.endswith('PUSH'): return False for j in xrange(linenum, clean_lines.NumLines(), 1): line = clean_lines.elided[j] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): return line.endswith('POP') # Allow operator= line = clean_lines.elided[linenum] if Search(r'\boperator\s*=\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) # Allow constructors match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) if match and match.group(1) == match.group(2): return IsDeletedOrDefault(clean_lines, linenum) if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) if Match(r'\s*[\w<>]+\s*\(', line): previous_line = 'ReturnType' if linenum > 0: previous_line = clean_lines.elided[linenum - 1] if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): return IsDeletedOrDefault(clean_lines, linenum) # Reject types not mentioned in template-argument-list while line: match = Match(r'^.*?(\w+)\s*&&(.*)$', line) if not match: break if match.group(1) not in typenames: return False line = match.group(2) # All RValue types that were in template-argument-list should have # been removed by now. Those were allowed, assuming that they will # be forwarded. # # If there are no remaining RValue types left (i.e. types that were # not found in template-argument-list), flag those as not allowed. return line.find('&&') < 0
312,380
Find list of template arguments associated with this function declaration. Args: clean_lines: A CleansedLines instance containing the file. linenum: Line number containing the start of the function declaration, usually one line after the end of the template-argument-list. Returns: Set of type names, or empty set if this does not appear to have any template parameters.
def GetTemplateArgs(clean_lines, linenum): # Find start of function func_line = linenum while func_line > 0: line = clean_lines.elided[func_line] if Match(r'^\s*$', line): return set() if line.find('(') >= 0: break func_line -= 1 if func_line == 0: return set() # Collapse template-argument-list into a single string argument_list = '' match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line]) if match: # template-argument-list on the same line as function name start_col = len(match.group(1)) _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col) if end_col > -1 and end_line == func_line: start_col += 1 # Skip the opening bracket argument_list = clean_lines.elided[func_line][start_col:end_col] elif func_line > 1: # template-argument-list one line before function name match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1]) if match: end_col = len(match.group(1)) _, start_line, start_col = ReverseCloseExpression( clean_lines, func_line - 1, end_col) if start_col > -1: start_col += 1 # Skip the opening bracket while start_line < func_line - 1: argument_list += clean_lines.elided[start_line][start_col:] start_col = 0 start_line += 1 argument_list += clean_lines.elided[func_line - 1][start_col:end_col] if not argument_list: return set() # Extract type names typenames = set() while True: match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$', argument_list) if not match: break typenames.add(match.group(1)) argument_list = match.group(2) return typenames
312,381
Check for rvalue references. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): # Find lines missing spaces around &&. # TODO(unknown): currently we don't check for rvalue references # with spaces surrounding the && to avoid false positives with # boolean expressions. line = clean_lines.elided[linenum] match = Match(r'^(.*\S)&&', line) if not match: match = Match(r'(.*)&&\S', line) if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): return # Either poorly formed && or an rvalue reference, check the context # to get a more accurate error message. Mostly we want to determine # if what's to the left of "&&" is a type or not. typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum, typenames): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&')
312,382
Looks for misplaced braces (e.g. at the end of line). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckBraces(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # get rid of comments and strings if Match(r'\s*{\s*$', line): # We allow an open brace to start a line in the case where someone is using # braces in a block to explicitly create a new scope, which is commonly used # to control the lifetime of stack-allocated variables. Braces are also # used for brace initializers inside function calls. We don't detect this # perfectly: we just don't complain if the last non-whitespace character on # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the # previous line starts a preprocessor block. prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if (not Search(r'[,;:}{(]\s*$', prevline) and not Match(r'\s*#', prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') # An else clause should be on the same line as the preceding closing brace. if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match(r'\s*}\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') # If braces come on one side of an else, they should be on both. # However, we have to worry about "else if" that spans multiple lines! if Search(r'else if\s*\(', line): # could be multi-line if brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) # find the ( after the if pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) brace_on_right = endline[endpos:].find('{') != -1 if brace_on_left != brace_on_right: # must be brace after if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') # Likewise, an else should never have the else clause on the same line if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') # In the same way, a do/while should never be on one line if Match(r'\s*do [^\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') # Check single-line if/else bodies. The style guide says 'curly braces are not # required for single-line statements'. We additionally allow multi-line, # single statements, but we reject anything with more than one semicolon in # it. This means that the first semicolon after the if should be at the end of # its line, and the line after that should have an indent level equal to or # lower than the if. We also check for ambiguous if/else nesting without # braces. if_else_match = Search(r'\b(if\s*\(|else\b)', line) if if_else_match and not Match(r'\s*#', line): if_indent = GetIndentLevel(line) endline, endlinenum, endpos = line, linenum, if_else_match.end() if_match = Search(r'\bif\s*\(', line) if if_match: # This could be a multiline if condition, so find the end first. pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) # Check for an opening brace, either directly after the if or on the next # line. If found, this isn't a single-statement conditional. if (not Match(r'\s*{', endline[endpos:]) and not (Match(r'\s*$', endline[endpos:]) and endlinenum < (len(clean_lines.elided) - 1) and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): while (endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]): endlinenum += 1 endpos = 0 if endlinenum < len(clean_lines.elided): endline = clean_lines.elided[endlinenum] # We allow a mix of whitespace and closing braces (e.g. for one-liner # methods) and a single \ after the semicolon (for macros) endpos = endline.find(';') if not Match(r';[\s}]*(\\?)$', endline[endpos:]): # Semicolon isn't the last character, there's something trailing. # Output a warning if the semicolon is not contained inside # a lambda expression. if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') elif endlinenum < len(clean_lines.elided) - 1: # Make sure the next line is dedented next_line = clean_lines.elided[endlinenum + 1] next_indent = GetIndentLevel(next_line) # With ambiguous nested if statements, this will error out on the # if that *doesn't* match the else, regardless of whether it's the # inner one or outer one. if (if_match and Match(r'\s*else\b', next_line) and next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. ' 'Ambiguous nested if/else chains require braces.') elif next_indent > if_indent: error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces')
312,383
Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckEmptyBlockBody(filename, clean_lines, linenum, error): # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue')
312,384
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
def _DropCommonSuffixes(filename): for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
312,386
Determines if the given filename has a suffix that identifies it as a test. Args: filename: The input filename. Returns: True if 'filename' looks like a test, False otherwise.
def _IsTestFilename(filename): if (filename.endswith('_test.cc') or filename.endswith('_unittest.cc') or filename.endswith('_regtest.cc')): return True else: return False
312,387
Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckGlobalStatic(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
312,388
Check that default lambda captures are not used. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # A lambda introducer specifies a default capture if it starts with "[=" # or if it starts with "[&" _not_ followed by an identifier. match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) if match: # Found a potential error, check what comes after the lambda-introducer. # If it's not open parenthesis (for lambda-declarator) or open brace # (for compound-statement), it's not a lambda. line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): error(filename, linenum, 'build/c++11', 4, # 4 = high confidence 'Default lambda captures are an unapproved C++ feature.')
312,392
Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def FlagCxx11Features(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Flag unapproved C++11 headers. include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name)
312,394
Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further.
def ProcessConfigOverrides(filename): abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True
312,395
Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint.
def ParseArguments(args): try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions=']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse'): PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') if not filenames: PrintUsage('No files were specified.') _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames
312,396
Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check.
def Check(self, error, filename, linenum): if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger))
312,399
Tree Render Style. Args: vertical: Sign for vertical line. cont: Chars for a continued branch. end: Chars for the last branch.
def __init__(self, vertical, cont, end): super(AbstractStyle, self).__init__() self.vertical = vertical self.cont = cont self.end = end assert (len(cont) == len(vertical) and len(cont) == len(end)), ( "'%s', '%s' and '%s' need to have equal length" % (vertical, cont, end))
312,694
This call returns an array of symbols that IEX Cloud supports for API calls. https://iexcloud.io/docs/api/#symbols 8am, 9am, 12pm, 1pm UTC daily Args: token (string); Access token version (string); API version Returns: dataframe: result
def symbolsDF(token='', version=''): df = pd.DataFrame(symbols(token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
312,711
This call returns an array of symbols the Investors Exchange supports for trading. This list is updated daily as of 7:45 a.m. ET. Symbols may be added or removed by the Investors Exchange after the list was produced. https://iexcloud.io/docs/api/#iex-symbols 8am, 9am, 12pm, 1pm UTC daily Args: token (string); Access token version (string); API version Returns: DataFrame: result
def iexSymbolsDF(token='', version=''): df = pd.DataFrame(iexSymbols(token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
312,712
This call returns an array of mutual fund symbols that IEX Cloud supports for API calls. https://iexcloud.io/docs/api/#mutual-fund-symbols 8am, 9am, 12pm, 1pm UTC daily Args: token (string); Access token version (string); API version Returns: DataFrame: result
def mutualFundSymbolsDF(token='', version=''): df = pd.DataFrame(mutualFundSymbols(token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
312,713
This call returns an array of OTC symbols that IEX Cloud supports for API calls. https://iexcloud.io/docs/api/#otc-symbols 8am, 9am, 12pm, 1pm UTC daily Args: token (string); Access token version (string); API version Returns: DataFrame: result
def otcSymbolsDF(token='', version=''): df = pd.DataFrame(otcSymbols(token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
312,714
Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years) https://iexcloud.io/docs/api/#balance-sheet Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
def balanceSheet(symbol, token='', version=''): _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/balance-sheet', token, version)
312,739
Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years) https://iexcloud.io/docs/api/#balance-sheet Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def balanceSheetDF(symbol, token='', version=''): val = balanceSheet(symbol, token, version) df = pd.io.json.json_normalize(val, 'balancesheet', 'symbol') _toDatetime(df) _reindex(df, 'reportDate') return df
312,740
Batch several data requests into one invocation https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: dict: results in json
def batch(symbols, fields=None, range_='1m', last=10, token='', version=''): fields = fields or _BATCH_TYPES[:10] # limit 10 if not isinstance(symbols, [].__class__): if not isinstance(symbols, str): raise PyEXception('batch expects string or list of strings for symbols argument') if isinstance(fields, str): fields = [fields] if range_ not in _TIMEFRAME_CHART: raise PyEXception('Range must be in %s' % str(_TIMEFRAME_CHART)) if isinstance(symbols, str): route = 'stock/{}/batch?types={}&range={}&last={}'.format(symbols, ','.join(fields), range_, last) return _getJson(route, token, version) if len(symbols) > 100: raise PyEXception('IEX will only handle up to 100 symbols at a time!') route = 'stock/market/batch?symbols={}&types={}&range={}&last={}'.format(','.join(symbols), ','.join(fields), range_, last) return _getJson(route, token, version)
312,741
Batch several data requests into one invocation https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: DataFrame: results in json
def batchDF(symbols, fields=None, range_='1m', last=10, token='', version=''): x = batch(symbols, fields, range_, last, token, version) ret = {} if isinstance(symbols, str): for field in x.keys(): ret[field] = _MAPPING[field](x[field]) else: for symbol in x.keys(): for field in x[symbol].keys(): if field not in ret: ret[field] = pd.DataFrame() dat = x[symbol][field] dat = _MAPPING[field](dat) dat['symbol'] = symbol ret[field] = pd.concat([ret[field], dat], sort=True) return ret
312,742
Optimized batch to fetch as much as possible at once https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: dict: results in json
def bulkBatch(symbols, fields=None, range_='1m', last=10, token='', version=''): fields = fields or _BATCH_TYPES args = [] empty_data = [] list_orig = empty_data.__class__ if not isinstance(symbols, list_orig): raise PyEXception('Symbols must be of type list') for i in range(0, len(symbols), 99): args.append((symbols[i:i+99], fields, range_, last, token, version)) pool = ThreadPool(20) rets = pool.starmap(batch, args) pool.close() ret = {} for i, d in enumerate(rets): symbols_subset = args[i][0] if len(d) != len(symbols_subset): empty_data.extend(list_orig(set(symbols_subset) - set(d.keys()))) ret.update(d) for k in empty_data: if k not in ret: if isinstance(fields, str): ret[k] = {} else: ret[k] = {x: {} for x in fields} return ret
312,743
Optimized batch to fetch as much as possible at once https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: DataFrame: results in json
def bulkBatchDF(symbols, fields=None, range_='1m', last=10, token='', version=''): dat = bulkBatch(symbols, fields, range_, last, token, version) ret = {} for symbol in dat: for field in dat[symbol]: if field not in ret: ret[field] = pd.DataFrame() d = dat[symbol][field] d = _MAPPING[field](d) d['symbol'] = symbol ret[field] = pd.concat([ret[field], d], sort=True) return ret
312,744
Book data https://iextrading.com/developer/docs/#book realtime during Investors Exchange market hours Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def bookDF(symbol, token='', version=''): x = book(symbol, token, version) df = _bookToDF(x) return df
312,746
Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years). https://iexcloud.io/docs/api/#cash-flow Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
def cashFlow(symbol, token='', version=''): _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/cash-flow', token, version)
312,747
Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years). https://iexcloud.io/docs/api/#cash-flow Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def cashFlowDF(symbol, token='', version=''): val = cashFlow(symbol, token, version) df = pd.io.json.json_normalize(val, 'cashflow', 'symbol') _toDatetime(df) _reindex(df, 'reportDate') df.replace(to_replace=[None], value=np.nan, inplace=True) return df
312,748
Returns an array of quote objects for a given collection type. Currently supported collection types are sector, tag, and list https://iexcloud.io/docs/api/#collections Args: tag (string); Sector, Tag, or List collectionName (string); Associated name for tag token (string); Access token version (string); API version Returns: dict: result
def collections(tag, collectionName, token='', version=''): if tag not in _COLLECTION_TAGS: raise PyEXception('Tag must be in %s' % str(_COLLECTION_TAGS)) return _getJson('stock/market/collection/' + tag + '?collectionName=' + collectionName, token, version)
312,754
Returns an array of quote objects for a given collection type. Currently supported collection types are sector, tag, and list https://iexcloud.io/docs/api/#collections Args: tag (string); Sector, Tag, or List collectionName (string); Associated name for tag token (string); Access token version (string); API version Returns: DataFrame: result
def collectionsDF(tag, query, token='', version=''): df = pd.DataFrame(collections(tag, query, token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
312,755
Company reference data https://iexcloud.io/docs/api/#company Updates at 4am and 5am UTC every day Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
def company(symbol, token='', version=''): _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/company', token, version)
312,756
Company reference data https://iexcloud.io/docs/api/#company Updates at 4am and 5am UTC every day Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def companyDF(symbol, token='', version=''): c = company(symbol, token, version) df = _companyToDF(c) return df
312,758
This returns the 15 minute delayed market quote. https://iexcloud.io/docs/api/#delayed-quote 15min delayed 4:30am - 8pm ET M-F when market is open Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
def delayedQuote(symbol, token='', version=''): _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/delayed-quote', token, version)
312,759
This returns the 15 minute delayed market quote. https://iexcloud.io/docs/api/#delayed-quote 15min delayed 4:30am - 8pm ET M-F when market is open Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def delayedQuoteDF(symbol, token='', version=''): df = pd.io.json.json_normalize(delayedQuote(symbol, token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
312,760
Dividend history https://iexcloud.io/docs/api/#dividends Updated at 9am UTC every day Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
def dividends(symbol, timeframe='ytd', token='', version=''): _raiseIfNotStr(symbol) if timeframe not in _TIMEFRAME_DIVSPLIT: raise PyEXception('Range must be in %s' % str(_TIMEFRAME_DIVSPLIT)) return _getJson('stock/' + symbol + '/dividends/' + timeframe, token, version)
312,761
Dividend history https://iexcloud.io/docs/api/#dividends Updated at 9am UTC every day Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def dividendsDF(symbol, timeframe='ytd', token='', version=''): d = dividends(symbol, timeframe, token, version) df = _dividendsToDF(d) return df
312,763