id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
226,700
yougov/mongo-connector
mongo_connector/oplog_manager.py
OplogThread.get_collection
def get_collection(self, namespace): """Get a pymongo collection from a namespace.""" database, coll = namespace.split(".", 1) return self.primary_client[database][coll]
python
def get_collection(self, namespace): database, coll = namespace.split(".", 1) return self.primary_client[database][coll]
[ "def", "get_collection", "(", "self", ",", "namespace", ")", ":", "database", ",", "coll", "=", "namespace", ".", "split", "(", "\".\"", ",", "1", ")", "return", "self", ".", "primary_client", "[", "database", "]", "[", "coll", "]" ]
Get a pymongo collection from a namespace.
[ "Get", "a", "pymongo", "collection", "from", "a", "namespace", "." ]
557cafd4b54c848cd54ef28a258391a154650cb4
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L516-L519
226,701
yougov/mongo-connector
mongo_connector/oplog_manager.py
OplogThread._get_oplog_timestamp
def _get_oplog_timestamp(self, newest_entry): """Return the timestamp of the latest or earliest entry in the oplog. """ sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING curr = ( self.oplog.find({"op": {"$ne": "n"}}).sort("$natural", sort_order).limit(-1) ) try: ts = next(curr)["ts"] except StopIteration: LOG.debug("OplogThread: oplog is empty.") return None LOG.debug( "OplogThread: %s oplog entry has timestamp %s." % ("Newest" if newest_entry else "Oldest", ts) ) return ts
python
def _get_oplog_timestamp(self, newest_entry): sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING curr = ( self.oplog.find({"op": {"$ne": "n"}}).sort("$natural", sort_order).limit(-1) ) try: ts = next(curr)["ts"] except StopIteration: LOG.debug("OplogThread: oplog is empty.") return None LOG.debug( "OplogThread: %s oplog entry has timestamp %s." % ("Newest" if newest_entry else "Oldest", ts) ) return ts
[ "def", "_get_oplog_timestamp", "(", "self", ",", "newest_entry", ")", ":", "sort_order", "=", "pymongo", ".", "DESCENDING", "if", "newest_entry", "else", "pymongo", ".", "ASCENDING", "curr", "=", "(", "self", ".", "oplog", ".", "find", "(", "{", "\"op\"", ...
Return the timestamp of the latest or earliest entry in the oplog.
[ "Return", "the", "timestamp", "of", "the", "latest", "or", "earliest", "entry", "in", "the", "oplog", "." ]
557cafd4b54c848cd54ef28a258391a154650cb4
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L736-L754
226,702
yougov/mongo-connector
mongo_connector/oplog_manager.py
OplogThread.init_cursor
def init_cursor(self): """Position the cursor appropriately. The cursor is set to either the beginning of the oplog, or wherever it was last left off. Returns the cursor and True if the cursor is empty. """ timestamp = self.read_last_checkpoint() if timestamp is None or self.only_dump: if self.collection_dump: # dump collection and update checkpoint timestamp = self.dump_collection() if self.only_dump: LOG.info("Finished dump. Exiting.") timestamp = None self.running = False self.update_checkpoint(timestamp) if timestamp is None: return None, True else: # Collection dump disabled: # Return cursor to beginning of oplog but do not set the # checkpoint. The checkpoint will be set after an operation # has been applied. cursor = self.get_oplog_cursor() return cursor, self._cursor_empty(cursor) cursor = self.get_oplog_cursor(timestamp) cursor_empty = self._cursor_empty(cursor) if cursor_empty: # rollback, update checkpoint, and retry LOG.debug("OplogThread: Initiating rollback from " "get_oplog_cursor") self.update_checkpoint(self.rollback()) return self.init_cursor() first_oplog_entry = next(cursor) oldest_ts_long = util.bson_ts_to_long(self.get_oldest_oplog_timestamp()) checkpoint_ts_long = util.bson_ts_to_long(timestamp) if checkpoint_ts_long < oldest_ts_long: # We've fallen behind, the checkpoint has fallen off the oplog return None, True cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"]) if cursor_ts_long > checkpoint_ts_long: # The checkpoint is not present in this oplog and the oplog # did not rollover. This means that we connected to a new # primary which did not replicate the checkpoint and which has # new changes in its oplog for us to process. # rollback, update checkpoint, and retry LOG.debug( "OplogThread: Initiating rollback from " "get_oplog_cursor: new oplog entries found but " "checkpoint is not present" ) self.update_checkpoint(self.rollback()) return self.init_cursor() # first entry has been consumed return cursor, cursor_empty
python
def init_cursor(self): timestamp = self.read_last_checkpoint() if timestamp is None or self.only_dump: if self.collection_dump: # dump collection and update checkpoint timestamp = self.dump_collection() if self.only_dump: LOG.info("Finished dump. Exiting.") timestamp = None self.running = False self.update_checkpoint(timestamp) if timestamp is None: return None, True else: # Collection dump disabled: # Return cursor to beginning of oplog but do not set the # checkpoint. The checkpoint will be set after an operation # has been applied. cursor = self.get_oplog_cursor() return cursor, self._cursor_empty(cursor) cursor = self.get_oplog_cursor(timestamp) cursor_empty = self._cursor_empty(cursor) if cursor_empty: # rollback, update checkpoint, and retry LOG.debug("OplogThread: Initiating rollback from " "get_oplog_cursor") self.update_checkpoint(self.rollback()) return self.init_cursor() first_oplog_entry = next(cursor) oldest_ts_long = util.bson_ts_to_long(self.get_oldest_oplog_timestamp()) checkpoint_ts_long = util.bson_ts_to_long(timestamp) if checkpoint_ts_long < oldest_ts_long: # We've fallen behind, the checkpoint has fallen off the oplog return None, True cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"]) if cursor_ts_long > checkpoint_ts_long: # The checkpoint is not present in this oplog and the oplog # did not rollover. This means that we connected to a new # primary which did not replicate the checkpoint and which has # new changes in its oplog for us to process. # rollback, update checkpoint, and retry LOG.debug( "OplogThread: Initiating rollback from " "get_oplog_cursor: new oplog entries found but " "checkpoint is not present" ) self.update_checkpoint(self.rollback()) return self.init_cursor() # first entry has been consumed return cursor, cursor_empty
[ "def", "init_cursor", "(", "self", ")", ":", "timestamp", "=", "self", ".", "read_last_checkpoint", "(", ")", "if", "timestamp", "is", "None", "or", "self", ".", "only_dump", ":", "if", "self", ".", "collection_dump", ":", "# dump collection and update checkpoin...
Position the cursor appropriately. The cursor is set to either the beginning of the oplog, or wherever it was last left off. Returns the cursor and True if the cursor is empty.
[ "Position", "the", "cursor", "appropriately", "." ]
557cafd4b54c848cd54ef28a258391a154650cb4
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L774-L837
226,703
yougov/mongo-connector
mongo_connector/oplog_manager.py
OplogThread.update_checkpoint
def update_checkpoint(self, checkpoint): """Store the current checkpoint in the oplog progress dictionary. """ if checkpoint is not None and checkpoint != self.checkpoint: self.checkpoint = checkpoint with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() # If we have the repr of our oplog collection # in the dictionary, remove it and replace it # with our replica set name. # This allows an easy upgrade path from mongo-connector 2.3. # For an explanation of the format change, see the comment in # read_last_checkpoint. oplog_dict.pop(str(self.oplog), None) oplog_dict[self.replset_name] = checkpoint LOG.debug("OplogThread: oplog checkpoint updated to %s", checkpoint) else: LOG.debug("OplogThread: no checkpoint to update.")
python
def update_checkpoint(self, checkpoint): if checkpoint is not None and checkpoint != self.checkpoint: self.checkpoint = checkpoint with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() # If we have the repr of our oplog collection # in the dictionary, remove it and replace it # with our replica set name. # This allows an easy upgrade path from mongo-connector 2.3. # For an explanation of the format change, see the comment in # read_last_checkpoint. oplog_dict.pop(str(self.oplog), None) oplog_dict[self.replset_name] = checkpoint LOG.debug("OplogThread: oplog checkpoint updated to %s", checkpoint) else: LOG.debug("OplogThread: no checkpoint to update.")
[ "def", "update_checkpoint", "(", "self", ",", "checkpoint", ")", ":", "if", "checkpoint", "is", "not", "None", "and", "checkpoint", "!=", "self", ".", "checkpoint", ":", "self", ".", "checkpoint", "=", "checkpoint", "with", "self", ".", "oplog_progress", "as...
Store the current checkpoint in the oplog progress dictionary.
[ "Store", "the", "current", "checkpoint", "in", "the", "oplog", "progress", "dictionary", "." ]
557cafd4b54c848cd54ef28a258391a154650cb4
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L839-L856
226,704
yougov/mongo-connector
mongo_connector/oplog_manager.py
OplogThread.read_last_checkpoint
def read_last_checkpoint(self): """Read the last checkpoint from the oplog progress dictionary. """ # In versions of mongo-connector 2.3 and before, # we used the repr of the # oplog collection as keys in the oplog_progress dictionary. # In versions thereafter, we use the replica set name. For backwards # compatibility, we check for both. oplog_str = str(self.oplog) ret_val = None with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() try: # New format. ret_val = oplog_dict[self.replset_name] except KeyError: try: # Old format. ret_val = oplog_dict[oplog_str] except KeyError: pass LOG.debug("OplogThread: reading last checkpoint as %s " % str(ret_val)) self.checkpoint = ret_val return ret_val
python
def read_last_checkpoint(self): # In versions of mongo-connector 2.3 and before, # we used the repr of the # oplog collection as keys in the oplog_progress dictionary. # In versions thereafter, we use the replica set name. For backwards # compatibility, we check for both. oplog_str = str(self.oplog) ret_val = None with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() try: # New format. ret_val = oplog_dict[self.replset_name] except KeyError: try: # Old format. ret_val = oplog_dict[oplog_str] except KeyError: pass LOG.debug("OplogThread: reading last checkpoint as %s " % str(ret_val)) self.checkpoint = ret_val return ret_val
[ "def", "read_last_checkpoint", "(", "self", ")", ":", "# In versions of mongo-connector 2.3 and before,", "# we used the repr of the", "# oplog collection as keys in the oplog_progress dictionary.", "# In versions thereafter, we use the replica set name. For backwards", "# compatibility, we chec...
Read the last checkpoint from the oplog progress dictionary.
[ "Read", "the", "last", "checkpoint", "from", "the", "oplog", "progress", "dictionary", "." ]
557cafd4b54c848cd54ef28a258391a154650cb4
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L858-L883
226,705
dragnet-org/dragnet
dragnet/features/weninger.py
ClusteredWeningerFeatures.fit
def fit(self, blocks, y=None): """ Fit a k-means clustering model using an ordered sequence of blocks. """ self.kmeans.fit(make_weninger_features(blocks)) # set the cluster center closest to the origin to exactly (0.0, 0.0) self.kmeans.cluster_centers_.sort(axis=0) self.kmeans.cluster_centers_[0, :] = np.zeros(2) return self
python
def fit(self, blocks, y=None): self.kmeans.fit(make_weninger_features(blocks)) # set the cluster center closest to the origin to exactly (0.0, 0.0) self.kmeans.cluster_centers_.sort(axis=0) self.kmeans.cluster_centers_[0, :] = np.zeros(2) return self
[ "def", "fit", "(", "self", ",", "blocks", ",", "y", "=", "None", ")", ":", "self", ".", "kmeans", ".", "fit", "(", "make_weninger_features", "(", "blocks", ")", ")", "# set the cluster center closest to the origin to exactly (0.0, 0.0)", "self", ".", "kmeans", "...
Fit a k-means clustering model using an ordered sequence of blocks.
[ "Fit", "a", "k", "-", "means", "clustering", "model", "using", "an", "ordered", "sequence", "of", "blocks", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/features/weninger.py#L82-L90
226,706
dragnet-org/dragnet
dragnet/model_training.py
evaluate_model_predictions
def evaluate_model_predictions(y_true, y_pred, weights=None): """ Evaluate the performance of an extractor model's binary classification predictions, typically at the block level, of whether a block is content or not. Args: y_true (``np.ndarray``) y_pred (``np.ndarray``) weights (``np.ndarray``) Returns: Dict[str, float] """ if isinstance(y_pred[0], np.ndarray): y_pred = np.concatenate(y_pred) if isinstance(y_true[0], np.ndarray): y_true = np.concatenate(y_true) if (weights is not None) and (isinstance(weights[0], np.ndarray)): weights = np.concatenate(weights) accuracy = accuracy_score( y_true, y_pred, normalize=True, sample_weight=weights) precision = precision_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) recall = recall_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) f1 = f1_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
python
def evaluate_model_predictions(y_true, y_pred, weights=None): if isinstance(y_pred[0], np.ndarray): y_pred = np.concatenate(y_pred) if isinstance(y_true[0], np.ndarray): y_true = np.concatenate(y_true) if (weights is not None) and (isinstance(weights[0], np.ndarray)): weights = np.concatenate(weights) accuracy = accuracy_score( y_true, y_pred, normalize=True, sample_weight=weights) precision = precision_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) recall = recall_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) f1 = f1_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
[ "def", "evaluate_model_predictions", "(", "y_true", ",", "y_pred", ",", "weights", "=", "None", ")", ":", "if", "isinstance", "(", "y_pred", "[", "0", "]", ",", "np", ".", "ndarray", ")", ":", "y_pred", "=", "np", ".", "concatenate", "(", "y_pred", ")"...
Evaluate the performance of an extractor model's binary classification predictions, typically at the block level, of whether a block is content or not. Args: y_true (``np.ndarray``) y_pred (``np.ndarray``) weights (``np.ndarray``) Returns: Dict[str, float]
[ "Evaluate", "the", "performance", "of", "an", "extractor", "model", "s", "binary", "classification", "predictions", "typically", "at", "the", "block", "level", "of", "whether", "a", "block", "is", "content", "or", "not", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/model_training.py#L19-L48
226,707
dragnet-org/dragnet
dragnet/model_training.py
evaluate_extracted_tokens
def evaluate_extracted_tokens(gold_content, extr_content): """ Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float] """ if isinstance(gold_content, string_): gold_content = simple_tokenizer(gold_content) if isinstance(extr_content, string_): extr_content = simple_tokenizer(extr_content) gold_set = set(gold_content) extr_set = set(extr_content) jaccard = len(gold_set & extr_set) / len(gold_set | extr_set) levenshtein = dameraulevenshtein(gold_content, extr_content) return {'jaccard': jaccard, 'levenshtein': levenshtein}
python
def evaluate_extracted_tokens(gold_content, extr_content): if isinstance(gold_content, string_): gold_content = simple_tokenizer(gold_content) if isinstance(extr_content, string_): extr_content = simple_tokenizer(extr_content) gold_set = set(gold_content) extr_set = set(extr_content) jaccard = len(gold_set & extr_set) / len(gold_set | extr_set) levenshtein = dameraulevenshtein(gold_content, extr_content) return {'jaccard': jaccard, 'levenshtein': levenshtein}
[ "def", "evaluate_extracted_tokens", "(", "gold_content", ",", "extr_content", ")", ":", "if", "isinstance", "(", "gold_content", ",", "string_", ")", ":", "gold_content", "=", "simple_tokenizer", "(", "gold_content", ")", "if", "isinstance", "(", "extr_content", "...
Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float]
[ "Evaluate", "the", "similarity", "between", "gold", "-", "standard", "and", "extracted", "content", "typically", "for", "a", "single", "HTML", "document", "as", "another", "way", "of", "evaluating", "the", "performance", "of", "an", "extractor", "model", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/model_training.py#L51-L74
226,708
dragnet-org/dragnet
dragnet/data_processing.py
extract_gold_standard_blocks
def extract_gold_standard_blocks(data_dir, fileroot, encoding=None, tokenizer=simple_tokenizer, cetr=False): """ Extract the gold standard block-level content and comments for a single observation identified by ``fileroot``, and write the results to file. Args: data_dir (str): The root directory containing sub-directories for raw HTML, gold standard extracted content, and gold standard blocks. fileroot (str): Unique identifier for a single observation of training data, corresponding to the start of its raw html and gold standard filenames under ``data_dir``. encoding (str) tokenizer (Callable): Object that takes a string and returns the tokens as a list of strings. cetr (bool): If True, parse the gold standard in clean eval format. Notes: Results are written to a text file in the block-level gold standard dir :obj:`GOLD_STANDARD_BLOCKS_DIRNAME` below ``data_dir``. Each line corresponds to a single block in its order of appearance, and has the following format:: content_frac comments_frac all_tokens content_tokens comments_tokens where each item is separated by a tab. ``content_frac`` is equal to the fraction of ``all_tokens`` found in the corresponding gold parse content text; ``comments_frac`` is the same but for comments text. """ # read the raw html, split it into blocks, and tokenize each block raw_html = read_html_file(data_dir, fileroot, encoding=encoding) # text is unicode from dragnet.blocks import BlockifyError try: blocks = [b.text for b in Blockifier.blockify(raw_html)] # text is bytes except BlockifyError as e: print('BlockifyError for file "{}"'.format(fileroot)) return blocks_tokens = [tokenizer(block) for block in blocks] num_blocks_tokens = [len(block_tokens) for block_tokens in blocks_tokens] # solve the longest common subsequence problem to determine which blocks were kept # need a list of all the tokens in the blocks, plus a correspondence of which # block they belong to. # we will determine which of the tokens is in the extracted content, # then use the correspondence to block id to determine which blocks were kept # get a flattened sequence of all tokens in all blocks # and their corresponding block ids all_blocks_tokens = [] all_blocks_tokens_block_id = [] for i, block_tokens in enumerate(blocks_tokens): all_blocks_tokens.extend(block_tokens) all_blocks_tokens_block_id.extend([i] * len(block_tokens)) # TODO: do we really need `num_all_blocks_tokens`? # it was used to determine if there were more gold standard tokens than *all* # tokens, and if so, some info was written to disk # but it seems like an odd check, and it's probably better to take the # gold standard data at face value -- presumably, somebody checked it! # num_all_blocks_tokens = len(all_blocks_tokens) def get_frac_and_str_tokens_in_gs(gs_txt): """ For each block, determine which and what fraction of tokens are also in the gold standard text ``gs_txt`` for either content or comments. Returns: List[float] List[str] """ gs_tokens = tokenizer(gs_txt) tokens_in_gs = check_inclusion(all_blocks_tokens, gs_tokens) num_blocks_tokens_in_gs = [0 for _ in range(len(blocks))] blocks_tokens_in_gs_tokens = [[] for _ in range(len(blocks))] for token, token_in_gs, block_id in zip(all_blocks_tokens, tokens_in_gs, all_blocks_tokens_block_id): if token_in_gs is True: num_blocks_tokens_in_gs[block_id] += 1 blocks_tokens_in_gs_tokens[block_id].append(token) blocks_tokens_strs_in_gs = [ ' '.join(block_tokens_in_gs_tokens) for block_tokens_in_gs_tokens in blocks_tokens_in_gs_tokens] frac_blocks_tokens_in_gs = [ num_block_tokens_in_gs / num_block_tokens for num_block_tokens_in_gs, num_block_tokens in zip(num_blocks_tokens_in_gs, num_blocks_tokens)] return (frac_blocks_tokens_in_gs, blocks_tokens_strs_in_gs) gs_content, gs_comments = read_gold_standard_file(data_dir, fileroot, cetr) frac_blocks_tokens_in_gs_content, blocks_tokens_strs_in_gs_content = \ get_frac_and_str_tokens_in_gs(gs_content) frac_blocks_tokens_in_gs_comments, blocks_tokens_strs_in_gs_comments = \ get_frac_and_str_tokens_in_gs(gs_comments) output_fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) line_fmt = u'{frac_content}\t{frac_comments}\t{block_tokens}\t{content_tokens}\t{comment_tokens}\n' with io.open(output_fname, mode='w') as f: for block_id, block_tokens in enumerate(blocks_tokens): line = line_fmt.format( frac_content=frac_blocks_tokens_in_gs_content[block_id], frac_comments=frac_blocks_tokens_in_gs_comments[block_id], block_tokens=' '.join(block_tokens), content_tokens=blocks_tokens_strs_in_gs_content[block_id], comment_tokens=blocks_tokens_strs_in_gs_comments[block_id]) f.write(line)
python
def extract_gold_standard_blocks(data_dir, fileroot, encoding=None, tokenizer=simple_tokenizer, cetr=False): # read the raw html, split it into blocks, and tokenize each block raw_html = read_html_file(data_dir, fileroot, encoding=encoding) # text is unicode from dragnet.blocks import BlockifyError try: blocks = [b.text for b in Blockifier.blockify(raw_html)] # text is bytes except BlockifyError as e: print('BlockifyError for file "{}"'.format(fileroot)) return blocks_tokens = [tokenizer(block) for block in blocks] num_blocks_tokens = [len(block_tokens) for block_tokens in blocks_tokens] # solve the longest common subsequence problem to determine which blocks were kept # need a list of all the tokens in the blocks, plus a correspondence of which # block they belong to. # we will determine which of the tokens is in the extracted content, # then use the correspondence to block id to determine which blocks were kept # get a flattened sequence of all tokens in all blocks # and their corresponding block ids all_blocks_tokens = [] all_blocks_tokens_block_id = [] for i, block_tokens in enumerate(blocks_tokens): all_blocks_tokens.extend(block_tokens) all_blocks_tokens_block_id.extend([i] * len(block_tokens)) # TODO: do we really need `num_all_blocks_tokens`? # it was used to determine if there were more gold standard tokens than *all* # tokens, and if so, some info was written to disk # but it seems like an odd check, and it's probably better to take the # gold standard data at face value -- presumably, somebody checked it! # num_all_blocks_tokens = len(all_blocks_tokens) def get_frac_and_str_tokens_in_gs(gs_txt): """ For each block, determine which and what fraction of tokens are also in the gold standard text ``gs_txt`` for either content or comments. Returns: List[float] List[str] """ gs_tokens = tokenizer(gs_txt) tokens_in_gs = check_inclusion(all_blocks_tokens, gs_tokens) num_blocks_tokens_in_gs = [0 for _ in range(len(blocks))] blocks_tokens_in_gs_tokens = [[] for _ in range(len(blocks))] for token, token_in_gs, block_id in zip(all_blocks_tokens, tokens_in_gs, all_blocks_tokens_block_id): if token_in_gs is True: num_blocks_tokens_in_gs[block_id] += 1 blocks_tokens_in_gs_tokens[block_id].append(token) blocks_tokens_strs_in_gs = [ ' '.join(block_tokens_in_gs_tokens) for block_tokens_in_gs_tokens in blocks_tokens_in_gs_tokens] frac_blocks_tokens_in_gs = [ num_block_tokens_in_gs / num_block_tokens for num_block_tokens_in_gs, num_block_tokens in zip(num_blocks_tokens_in_gs, num_blocks_tokens)] return (frac_blocks_tokens_in_gs, blocks_tokens_strs_in_gs) gs_content, gs_comments = read_gold_standard_file(data_dir, fileroot, cetr) frac_blocks_tokens_in_gs_content, blocks_tokens_strs_in_gs_content = \ get_frac_and_str_tokens_in_gs(gs_content) frac_blocks_tokens_in_gs_comments, blocks_tokens_strs_in_gs_comments = \ get_frac_and_str_tokens_in_gs(gs_comments) output_fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) line_fmt = u'{frac_content}\t{frac_comments}\t{block_tokens}\t{content_tokens}\t{comment_tokens}\n' with io.open(output_fname, mode='w') as f: for block_id, block_tokens in enumerate(blocks_tokens): line = line_fmt.format( frac_content=frac_blocks_tokens_in_gs_content[block_id], frac_comments=frac_blocks_tokens_in_gs_comments[block_id], block_tokens=' '.join(block_tokens), content_tokens=blocks_tokens_strs_in_gs_content[block_id], comment_tokens=blocks_tokens_strs_in_gs_comments[block_id]) f.write(line)
[ "def", "extract_gold_standard_blocks", "(", "data_dir", ",", "fileroot", ",", "encoding", "=", "None", ",", "tokenizer", "=", "simple_tokenizer", ",", "cetr", "=", "False", ")", ":", "# read the raw html, split it into blocks, and tokenize each block", "raw_html", "=", ...
Extract the gold standard block-level content and comments for a single observation identified by ``fileroot``, and write the results to file. Args: data_dir (str): The root directory containing sub-directories for raw HTML, gold standard extracted content, and gold standard blocks. fileroot (str): Unique identifier for a single observation of training data, corresponding to the start of its raw html and gold standard filenames under ``data_dir``. encoding (str) tokenizer (Callable): Object that takes a string and returns the tokens as a list of strings. cetr (bool): If True, parse the gold standard in clean eval format. Notes: Results are written to a text file in the block-level gold standard dir :obj:`GOLD_STANDARD_BLOCKS_DIRNAME` below ``data_dir``. Each line corresponds to a single block in its order of appearance, and has the following format:: content_frac comments_frac all_tokens content_tokens comments_tokens where each item is separated by a tab. ``content_frac`` is equal to the fraction of ``all_tokens`` found in the corresponding gold parse content text; ``comments_frac`` is the same but for comments text.
[ "Extract", "the", "gold", "standard", "block", "-", "level", "content", "and", "comments", "for", "a", "single", "observation", "identified", "by", "fileroot", "and", "write", "the", "results", "to", "file", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/data_processing.py#L89-L196
226,709
dragnet-org/dragnet
dragnet/data_processing.py
get_filenames
def get_filenames(dirname, full_path=False, match_regex=None, extension=None): """ Get all filenames under ``dirname`` that match ``match_regex`` or have file extension equal to ``extension``, optionally prepending the full path. Args: dirname (str): /path/to/dir on disk where files to read are saved full_path (bool): if False, return filenames without path; if True, return filenames with path, as ``os.path.join(dirname, fname)`` match_regex (str): include files whose names match this regex pattern extension (str): if files only of a certain type are wanted, specify the file extension (e.g. ".txt") Yields: str: next matching filename """ if not os.path.exists(dirname): raise OSError('directory "{}" does not exist'.format(dirname)) match_regex = re.compile(match_regex) if match_regex else None for filename in sorted(os.listdir(dirname)): if extension and not os.path.splitext(filename)[-1] == extension: continue if match_regex and not match_regex.search(filename): continue if full_path is True: yield os.path.join(dirname, filename) else: yield filename
python
def get_filenames(dirname, full_path=False, match_regex=None, extension=None): if not os.path.exists(dirname): raise OSError('directory "{}" does not exist'.format(dirname)) match_regex = re.compile(match_regex) if match_regex else None for filename in sorted(os.listdir(dirname)): if extension and not os.path.splitext(filename)[-1] == extension: continue if match_regex and not match_regex.search(filename): continue if full_path is True: yield os.path.join(dirname, filename) else: yield filename
[ "def", "get_filenames", "(", "dirname", ",", "full_path", "=", "False", ",", "match_regex", "=", "None", ",", "extension", "=", "None", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "raise", "OSError", "(", "'direct...
Get all filenames under ``dirname`` that match ``match_regex`` or have file extension equal to ``extension``, optionally prepending the full path. Args: dirname (str): /path/to/dir on disk where files to read are saved full_path (bool): if False, return filenames without path; if True, return filenames with path, as ``os.path.join(dirname, fname)`` match_regex (str): include files whose names match this regex pattern extension (str): if files only of a certain type are wanted, specify the file extension (e.g. ".txt") Yields: str: next matching filename
[ "Get", "all", "filenames", "under", "dirname", "that", "match", "match_regex", "or", "have", "file", "extension", "equal", "to", "extension", "optionally", "prepending", "the", "full", "path", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/data_processing.py#L199-L226
226,710
dragnet-org/dragnet
dragnet/data_processing.py
read_html_file
def read_html_file(data_dir, fileroot, encoding=None): """ Read the HTML file corresponding to identifier ``fileroot`` in the raw HTML directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) Returns: str """ fname = os.path.join( data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT) encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1') # 'utf-16' for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: raw_html = f.read() break except (UnicodeDecodeError, UnicodeError): raw_html = None return ftfy.fix_encoding(raw_html).strip()
python
def read_html_file(data_dir, fileroot, encoding=None): fname = os.path.join( data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT) encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1') # 'utf-16' for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: raw_html = f.read() break except (UnicodeDecodeError, UnicodeError): raw_html = None return ftfy.fix_encoding(raw_html).strip()
[ "def", "read_html_file", "(", "data_dir", ",", "fileroot", ",", "encoding", "=", "None", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "RAW_HTML_DIRNAME", ",", "fileroot", "+", "RAW_HTML_EXT", ")", "encodings", "=", "(", "...
Read the HTML file corresponding to identifier ``fileroot`` in the raw HTML directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) Returns: str
[ "Read", "the", "HTML", "file", "corresponding", "to", "identifier", "fileroot", "in", "the", "raw", "HTML", "directory", "below", "the", "root", "data_dir", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/data_processing.py#L229-L253
226,711
dragnet-org/dragnet
dragnet/data_processing.py
read_gold_standard_file
def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False): """ Read the gold standard content file corresponding to identifier ``fileroot`` in the gold standard directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) cetr (bool): if True, assume no comments and parse the gold standard to remove tags Returns: List[str, str]: contents string and comments string, respectively """ fname = os.path.join( data_dir, GOLD_STANDARD_DIRNAME, fileroot + GOLD_STANDARD_EXT) encodings = (encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1') for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: gold_standard = f.read() break except (UnicodeDecodeError, UnicodeError): gold_standard = None if not gold_standard: return [u'', u''] if not cetr: content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1) # if no comments delimiter found, append empty comments string if len(content_comments) == 1: content_comments = [content_comments[0], u''] else: tree = etree.fromstring(gold_standard, parser=etree.HTMLParser()) content_comments = [u' '.join(text_from_subtree(tree)), u''] # fix text in case of mangled encodings content_comments = [ftfy.fix_encoding(content_comments[0]).strip(), ftfy.fix_encoding(content_comments[1]).strip()] return content_comments
python
def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False): fname = os.path.join( data_dir, GOLD_STANDARD_DIRNAME, fileroot + GOLD_STANDARD_EXT) encodings = (encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1') for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: gold_standard = f.read() break except (UnicodeDecodeError, UnicodeError): gold_standard = None if not gold_standard: return [u'', u''] if not cetr: content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1) # if no comments delimiter found, append empty comments string if len(content_comments) == 1: content_comments = [content_comments[0], u''] else: tree = etree.fromstring(gold_standard, parser=etree.HTMLParser()) content_comments = [u' '.join(text_from_subtree(tree)), u''] # fix text in case of mangled encodings content_comments = [ftfy.fix_encoding(content_comments[0]).strip(), ftfy.fix_encoding(content_comments[1]).strip()] return content_comments
[ "def", "read_gold_standard_file", "(", "data_dir", ",", "fileroot", ",", "encoding", "=", "None", ",", "cetr", "=", "False", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "GOLD_STANDARD_DIRNAME", ",", "fileroot", "+", "GOLD_...
Read the gold standard content file corresponding to identifier ``fileroot`` in the gold standard directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) cetr (bool): if True, assume no comments and parse the gold standard to remove tags Returns: List[str, str]: contents string and comments string, respectively
[ "Read", "the", "gold", "standard", "content", "file", "corresponding", "to", "identifier", "fileroot", "in", "the", "gold", "standard", "directory", "below", "the", "root", "data_dir", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/data_processing.py#L256-L298
226,712
dragnet-org/dragnet
dragnet/data_processing.py
read_gold_standard_blocks_file
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True): """ Read the gold standard blocks file corresponding to identifier ``fileroot`` in the gold standard blocks directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) split_blocks (bool): If True, split the file's content into blocks. Returns: str or List[str] """ fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) with io.open(fname, mode='r') as f: data = f.read() if split_blocks: return filter(None, data[:-1].split('\n')) return filter(None, data)
python
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True): fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) with io.open(fname, mode='r') as f: data = f.read() if split_blocks: return filter(None, data[:-1].split('\n')) return filter(None, data)
[ "def", "read_gold_standard_blocks_file", "(", "data_dir", ",", "fileroot", ",", "split_blocks", "=", "True", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "GOLD_STANDARD_BLOCKS_DIRNAME", ",", "fileroot", "+", "GOLD_STANDARD_BLOCKS_E...
Read the gold standard blocks file corresponding to identifier ``fileroot`` in the gold standard blocks directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) split_blocks (bool): If True, split the file's content into blocks. Returns: str or List[str]
[ "Read", "the", "gold", "standard", "blocks", "file", "corresponding", "to", "identifier", "fileroot", "in", "the", "gold", "standard", "blocks", "directory", "below", "the", "root", "data_dir", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/data_processing.py#L301-L320
226,713
dragnet-org/dragnet
dragnet/data_processing.py
prepare_data
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1): """ Prepare data for a single HTML + gold standard blocks example, uniquely identified by ``fileroot``. Args: data_dir (str) fileroot (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]: The first element is simply the raw html as a string. The second and third elements are 3-tuples for content and comments, respectively, where the first element is a numpy array of 1s and 0s whose values correspond to whether or not a given block is considered non-content or not; the second element is a numpy integer array whose values are the total number of tokens in each block; and the third element is a flat list of content or comment tokens as strings, concatenated from all blocks. See Also: :func:`prepare_all_data` """ if not 0.0 <= block_pct_tokens_thresh <= 1.0: raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]') html = read_html_file(data_dir, fileroot) blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True) content_blocks = [] comments_blocks = [] for block in blocks: block_split = block.split('\t') num_block_tokens = len(block_split[2].split()) # total number of tokens in block is used as weights content_blocks.append( (float(block_split[0]), num_block_tokens, block_split[3].split())) comments_blocks.append( (float(block_split[1]), num_block_tokens, block_split[4].split())) parsed_content_blocks = _parse_content_or_comments_blocks( content_blocks, block_pct_tokens_thresh) parsed_comments_blocks = _parse_content_or_comments_blocks( comments_blocks, block_pct_tokens_thresh) return (html, parsed_content_blocks, parsed_comments_blocks)
python
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1): if not 0.0 <= block_pct_tokens_thresh <= 1.0: raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]') html = read_html_file(data_dir, fileroot) blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True) content_blocks = [] comments_blocks = [] for block in blocks: block_split = block.split('\t') num_block_tokens = len(block_split[2].split()) # total number of tokens in block is used as weights content_blocks.append( (float(block_split[0]), num_block_tokens, block_split[3].split())) comments_blocks.append( (float(block_split[1]), num_block_tokens, block_split[4].split())) parsed_content_blocks = _parse_content_or_comments_blocks( content_blocks, block_pct_tokens_thresh) parsed_comments_blocks = _parse_content_or_comments_blocks( comments_blocks, block_pct_tokens_thresh) return (html, parsed_content_blocks, parsed_comments_blocks)
[ "def", "prepare_data", "(", "data_dir", ",", "fileroot", ",", "block_pct_tokens_thresh", "=", "0.1", ")", ":", "if", "not", "0.0", "<=", "block_pct_tokens_thresh", "<=", "1.0", ":", "raise", "ValueError", "(", "'block_pct_tokens_thresh must be in the range [0.0, 1.0]'",...
Prepare data for a single HTML + gold standard blocks example, uniquely identified by ``fileroot``. Args: data_dir (str) fileroot (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]: The first element is simply the raw html as a string. The second and third elements are 3-tuples for content and comments, respectively, where the first element is a numpy array of 1s and 0s whose values correspond to whether or not a given block is considered non-content or not; the second element is a numpy integer array whose values are the total number of tokens in each block; and the third element is a flat list of content or comment tokens as strings, concatenated from all blocks. See Also: :func:`prepare_all_data`
[ "Prepare", "data", "for", "a", "single", "HTML", "+", "gold", "standard", "blocks", "example", "uniquely", "identified", "by", "fileroot", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/data_processing.py#L331-L377
226,714
dragnet-org/dragnet
dragnet/data_processing.py
prepare_all_data
def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1): """ Prepare data for all HTML + gold standard blocks examples in ``data_dir``. Args: data_dir (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]] See Also: :func:`prepare_data` """ gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME) gs_blocks_filenames = get_filenames( gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT)) gs_blocks_fileroots = ( re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1) for gs_blocks_filename in gs_blocks_filenames) return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh) for fileroot in gs_blocks_fileroots]
python
def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1): gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME) gs_blocks_filenames = get_filenames( gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT)) gs_blocks_fileroots = ( re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1) for gs_blocks_filename in gs_blocks_filenames) return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh) for fileroot in gs_blocks_fileroots]
[ "def", "prepare_all_data", "(", "data_dir", ",", "block_pct_tokens_thresh", "=", "0.1", ")", ":", "gs_blocks_dir", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "GOLD_STANDARD_BLOCKS_DIRNAME", ")", "gs_blocks_filenames", "=", "get_filenames", "(", "gs...
Prepare data for all HTML + gold standard blocks examples in ``data_dir``. Args: data_dir (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]] See Also: :func:`prepare_data`
[ "Prepare", "data", "for", "all", "HTML", "+", "gold", "standard", "blocks", "examples", "in", "data_dir", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/data_processing.py#L380-L402
226,715
dragnet-org/dragnet
dragnet/compat.py
str_cast
def str_cast(maybe_bytes, encoding='utf-8'): """ Converts any bytes-like input to a string-like output, with respect to python version Parameters ---------- maybe_bytes : if this is a bytes-like object, it will be converted to a string encoding : str, default='utf-8' encoding to be used when decoding bytes """ if isinstance(maybe_bytes, bytes_): return maybe_bytes.decode(encoding) else: return maybe_bytes
python
def str_cast(maybe_bytes, encoding='utf-8'): if isinstance(maybe_bytes, bytes_): return maybe_bytes.decode(encoding) else: return maybe_bytes
[ "def", "str_cast", "(", "maybe_bytes", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "maybe_bytes", ",", "bytes_", ")", ":", "return", "maybe_bytes", ".", "decode", "(", "encoding", ")", "else", ":", "return", "maybe_bytes" ]
Converts any bytes-like input to a string-like output, with respect to python version Parameters ---------- maybe_bytes : if this is a bytes-like object, it will be converted to a string encoding : str, default='utf-8' encoding to be used when decoding bytes
[ "Converts", "any", "bytes", "-", "like", "input", "to", "a", "string", "-", "like", "output", "with", "respect", "to", "python", "version" ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/compat.py#L24-L38
226,716
dragnet-org/dragnet
dragnet/compat.py
bytes_cast
def bytes_cast(maybe_str, encoding='utf-8'): """ Converts any string-like input to a bytes-like output, with respect to python version Parameters ---------- maybe_str : if this is a string-like object, it will be converted to bytes encoding : str, default='utf-8' encoding to be used when encoding string """ if isinstance(maybe_str, unicode_): return maybe_str.encode(encoding) else: return maybe_str
python
def bytes_cast(maybe_str, encoding='utf-8'): if isinstance(maybe_str, unicode_): return maybe_str.encode(encoding) else: return maybe_str
[ "def", "bytes_cast", "(", "maybe_str", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "maybe_str", ",", "unicode_", ")", ":", "return", "maybe_str", ".", "encode", "(", "encoding", ")", "else", ":", "return", "maybe_str" ]
Converts any string-like input to a bytes-like output, with respect to python version Parameters ---------- maybe_str : if this is a string-like object, it will be converted to bytes encoding : str, default='utf-8' encoding to be used when encoding string
[ "Converts", "any", "string", "-", "like", "input", "to", "a", "bytes", "-", "like", "output", "with", "respect", "to", "python", "version" ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/compat.py#L41-L55
226,717
dragnet-org/dragnet
dragnet/compat.py
str_dict_cast
def str_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs): """ Converts any bytes-like items in input dict to string-like values, with respect to python version Parameters ---------- dict_ : dict any bytes-like objects contained in the dict will be converted to a string include_keys : bool, default=True if True, cast keys to a string, else ignore include_values : bool, default=True if True, cast values to a string, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when decoding bytes """ new_keys = str_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys() new_vals = str_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values() new_dict = dict(zip_(new_keys, new_vals)) return new_dict
python
def str_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs): new_keys = str_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys() new_vals = str_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values() new_dict = dict(zip_(new_keys, new_vals)) return new_dict
[ "def", "str_dict_cast", "(", "dict_", ",", "include_keys", "=", "True", ",", "include_vals", "=", "True", ",", "*", "*", "kwargs", ")", ":", "new_keys", "=", "str_list_cast", "(", "dict_", ".", "keys", "(", ")", ",", "*", "*", "kwargs", ")", "if", "i...
Converts any bytes-like items in input dict to string-like values, with respect to python version Parameters ---------- dict_ : dict any bytes-like objects contained in the dict will be converted to a string include_keys : bool, default=True if True, cast keys to a string, else ignore include_values : bool, default=True if True, cast values to a string, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when decoding bytes
[ "Converts", "any", "bytes", "-", "like", "items", "in", "input", "dict", "to", "string", "-", "like", "values", "with", "respect", "to", "python", "version" ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/compat.py#L91-L112
226,718
dragnet-org/dragnet
dragnet/compat.py
bytes_dict_cast
def bytes_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs): """ Converts any string-like items in input dict to bytes-like values, with respect to python version Parameters ---------- dict_ : dict any string-like objects contained in the dict will be converted to bytes include_keys : bool, default=True if True, cast keys to bytes, else ignore include_values : bool, default=True if True, cast values to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ new_keys = bytes_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys() new_vals = bytes_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values() new_dict = dict(zip_(new_keys, new_vals)) return new_dict
python
def bytes_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs): new_keys = bytes_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys() new_vals = bytes_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values() new_dict = dict(zip_(new_keys, new_vals)) return new_dict
[ "def", "bytes_dict_cast", "(", "dict_", ",", "include_keys", "=", "True", ",", "include_vals", "=", "True", ",", "*", "*", "kwargs", ")", ":", "new_keys", "=", "bytes_list_cast", "(", "dict_", ".", "keys", "(", ")", ",", "*", "*", "kwargs", ")", "if", ...
Converts any string-like items in input dict to bytes-like values, with respect to python version Parameters ---------- dict_ : dict any string-like objects contained in the dict will be converted to bytes include_keys : bool, default=True if True, cast keys to bytes, else ignore include_values : bool, default=True if True, cast values to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string
[ "Converts", "any", "string", "-", "like", "items", "in", "input", "dict", "to", "bytes", "-", "like", "values", "with", "respect", "to", "python", "version" ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/compat.py#L115-L135
226,719
dragnet-org/dragnet
dragnet/compat.py
str_block_cast
def str_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any bytes-like items in input Block object to string-like values, with respect to python version Parameters ---------- block : blocks.Block any bytes-like objects contained in the block object will be converted to a string include_text : bool, default=True if True, cast text to a string, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to a string, else ignore include_css : bool, default=True if True, cast css to a string, else ignore include_features : bool, default=True if True, cast features to a string, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when decoding bytes """ if include_text: block.text = str_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = str_list_cast(block.link_tokens, **kwargs) if include_css: block.css = str_dict_cast(block.css, **kwargs) if include_features: block.features = str_dict_cast(block.features, **kwargs) return block
python
def str_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): if include_text: block.text = str_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = str_list_cast(block.link_tokens, **kwargs) if include_css: block.css = str_dict_cast(block.css, **kwargs) if include_features: block.features = str_dict_cast(block.features, **kwargs) return block
[ "def", "str_block_cast", "(", "block", ",", "include_text", "=", "True", ",", "include_link_tokens", "=", "True", ",", "include_css", "=", "True", ",", "include_features", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "include_text", ":", "block", ...
Converts any bytes-like items in input Block object to string-like values, with respect to python version Parameters ---------- block : blocks.Block any bytes-like objects contained in the block object will be converted to a string include_text : bool, default=True if True, cast text to a string, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to a string, else ignore include_css : bool, default=True if True, cast css to a string, else ignore include_features : bool, default=True if True, cast features to a string, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when decoding bytes
[ "Converts", "any", "bytes", "-", "like", "items", "in", "input", "Block", "object", "to", "string", "-", "like", "values", "with", "respect", "to", "python", "version" ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/compat.py#L138-L173
226,720
dragnet-org/dragnet
dragnet/compat.py
bytes_block_cast
def bytes_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ if include_text: block.text = bytes_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs) if include_css: block.css = bytes_dict_cast(block.css, **kwargs) if include_features: block.features = bytes_dict_cast(block.features, **kwargs) return block
python
def bytes_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): if include_text: block.text = bytes_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs) if include_css: block.css = bytes_dict_cast(block.css, **kwargs) if include_features: block.features = bytes_dict_cast(block.features, **kwargs) return block
[ "def", "bytes_block_cast", "(", "block", ",", "include_text", "=", "True", ",", "include_link_tokens", "=", "True", ",", "include_css", "=", "True", ",", "include_features", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "include_text", ":", "block", ...
Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string
[ "Converts", "any", "string", "-", "like", "items", "in", "input", "Block", "object", "to", "bytes", "-", "like", "values", "with", "respect", "to", "python", "version" ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/compat.py#L176-L211
226,721
dragnet-org/dragnet
dragnet/util.py
dameraulevenshtein
def dameraulevenshtein(seq1, seq2): """Calculate the Damerau-Levenshtein distance between sequences. This distance is the number of additions, deletions, substitutions, and transpositions needed to transform the first sequence into the second. Although generally used with strings, any sequences of comparable objects will work. Transpositions are exchanges of *consecutive* characters; all other operations are self-explanatory. This implementation is O(N*M) time and O(M) space, for N and M the lengths of the two sequences. >>> dameraulevenshtein('ba', 'abc') 2 >>> dameraulevenshtein('fee', 'deed') 2 It works with arbitrary sequences too: >>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e']) 2 """ # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix. # However, only the current and two previous rows are needed at once, # so we only store those. oneago = None thisrow = list(range_(1, len(seq2) + 1)) + [0] for x in range_(len(seq1)): # Python lists wrap around for negative indices, so put the # leftmost column at the *end* of the list. This matches with # the zero-indexed strings and saves extra calculation. twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1] for y in range_(len(seq2)): delcost = oneago[y] + 1 addcost = thisrow[y - 1] + 1 subcost = oneago[y - 1] + (seq1[x] != seq2[y]) thisrow[y] = min(delcost, addcost, subcost) # This block deals with transpositions if (x > 0 and y > 0 and seq1[x] == seq2[y - 1] and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]): thisrow[y] = min(thisrow[y], twoago[y - 2] + 1) return thisrow[len(seq2) - 1]
python
def dameraulevenshtein(seq1, seq2): # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix. # However, only the current and two previous rows are needed at once, # so we only store those. oneago = None thisrow = list(range_(1, len(seq2) + 1)) + [0] for x in range_(len(seq1)): # Python lists wrap around for negative indices, so put the # leftmost column at the *end* of the list. This matches with # the zero-indexed strings and saves extra calculation. twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1] for y in range_(len(seq2)): delcost = oneago[y] + 1 addcost = thisrow[y - 1] + 1 subcost = oneago[y - 1] + (seq1[x] != seq2[y]) thisrow[y] = min(delcost, addcost, subcost) # This block deals with transpositions if (x > 0 and y > 0 and seq1[x] == seq2[y - 1] and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]): thisrow[y] = min(thisrow[y], twoago[y - 2] + 1) return thisrow[len(seq2) - 1]
[ "def", "dameraulevenshtein", "(", "seq1", ",", "seq2", ")", ":", "# codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F", "# Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.", "# However, only the current and two previous rows are needed at once,", "# so we only store those....
Calculate the Damerau-Levenshtein distance between sequences. This distance is the number of additions, deletions, substitutions, and transpositions needed to transform the first sequence into the second. Although generally used with strings, any sequences of comparable objects will work. Transpositions are exchanges of *consecutive* characters; all other operations are self-explanatory. This implementation is O(N*M) time and O(M) space, for N and M the lengths of the two sequences. >>> dameraulevenshtein('ba', 'abc') 2 >>> dameraulevenshtein('fee', 'deed') 2 It works with arbitrary sequences too: >>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e']) 2
[ "Calculate", "the", "Damerau", "-", "Levenshtein", "distance", "between", "sequences", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/util.py#L20-L63
226,722
dragnet-org/dragnet
dragnet/util.py
load_pickled_model
def load_pickled_model(filename, dirname=None): """ Load a pickled ``Extractor`` model from disk. Args: filename (str): Name of pickled model file under ``dirname``. dirname (str): Name of directory on disk containing the pickled model. If None, dragnet's default pickled model directory is used: /path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION] Returns: :class:`dragnet.extractor.Extractor` """ if dirname is None: pkg_filename = pkgutil.get_loader('dragnet').get_filename('dragnet') pkg_dirname = os.path.dirname(pkg_filename) dirname = os.path.join(pkg_dirname, 'pickled_models', model_path) filepath = os.path.join(dirname, filename) return joblib.load(filepath)
python
def load_pickled_model(filename, dirname=None): if dirname is None: pkg_filename = pkgutil.get_loader('dragnet').get_filename('dragnet') pkg_dirname = os.path.dirname(pkg_filename) dirname = os.path.join(pkg_dirname, 'pickled_models', model_path) filepath = os.path.join(dirname, filename) return joblib.load(filepath)
[ "def", "load_pickled_model", "(", "filename", ",", "dirname", "=", "None", ")", ":", "if", "dirname", "is", "None", ":", "pkg_filename", "=", "pkgutil", ".", "get_loader", "(", "'dragnet'", ")", ".", "get_filename", "(", "'dragnet'", ")", "pkg_dirname", "=",...
Load a pickled ``Extractor`` model from disk. Args: filename (str): Name of pickled model file under ``dirname``. dirname (str): Name of directory on disk containing the pickled model. If None, dragnet's default pickled model directory is used: /path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION] Returns: :class:`dragnet.extractor.Extractor`
[ "Load", "a", "pickled", "Extractor", "model", "from", "disk", "." ]
532c9d9f28e5b1b57f3cabc708218d3863a16322
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/util.py#L150-L168
226,723
tgsmith61591/pmdarima
pmdarima/arima/utils.py
nsdiffs
def nsdiffs(x, m, max_D=2, test='ocsb', **kwargs): """Estimate the seasonal differencing term, ``D``. Perform a test of seasonality for different levels of ``D`` to estimate the number of seasonal differences required to make a given time series stationary. Will select the maximum value of ``D`` for which the time series is judged seasonally stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. m : int The number of seasonal periods (i.e., frequency of the time series) max_D : int, optional (default=2) Maximum number of seasonal differences allowed. Must be a positive integer. The estimated value of ``D`` will not exceed ``max_D``. test : str, optional (default='ocsb') Type of unit root test of seasonality to use in order to detect seasonal periodicity. Valid tests include ("ocsb", "ch"). Note that the CHTest is very slow for large data. Returns ------- D : int The estimated seasonal differencing term. This is the maximum value of ``D`` such that ``D <= max_D`` and the time series is judged seasonally stationary. If the time series is constant, will return 0. """ if max_D <= 0: raise ValueError('max_D must be a positive integer') # get the test - this validates m internally testfunc = get_callable(test, VALID_STESTS)(m, **kwargs)\ .estimate_seasonal_differencing_term x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) if is_constant(x): return 0 D = 0 dodiff = testfunc(x) while dodiff == 1 and D < max_D: D += 1 x = diff(x, lag=m) if is_constant(x): return D dodiff = testfunc(x) return D
python
def nsdiffs(x, m, max_D=2, test='ocsb', **kwargs): if max_D <= 0: raise ValueError('max_D must be a positive integer') # get the test - this validates m internally testfunc = get_callable(test, VALID_STESTS)(m, **kwargs)\ .estimate_seasonal_differencing_term x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) if is_constant(x): return 0 D = 0 dodiff = testfunc(x) while dodiff == 1 and D < max_D: D += 1 x = diff(x, lag=m) if is_constant(x): return D dodiff = testfunc(x) return D
[ "def", "nsdiffs", "(", "x", ",", "m", ",", "max_D", "=", "2", ",", "test", "=", "'ocsb'", ",", "*", "*", "kwargs", ")", ":", "if", "max_D", "<=", "0", ":", "raise", "ValueError", "(", "'max_D must be a positive integer'", ")", "# get the test - this valida...
Estimate the seasonal differencing term, ``D``. Perform a test of seasonality for different levels of ``D`` to estimate the number of seasonal differences required to make a given time series stationary. Will select the maximum value of ``D`` for which the time series is judged seasonally stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. m : int The number of seasonal periods (i.e., frequency of the time series) max_D : int, optional (default=2) Maximum number of seasonal differences allowed. Must be a positive integer. The estimated value of ``D`` will not exceed ``max_D``. test : str, optional (default='ocsb') Type of unit root test of seasonality to use in order to detect seasonal periodicity. Valid tests include ("ocsb", "ch"). Note that the CHTest is very slow for large data. Returns ------- D : int The estimated seasonal differencing term. This is the maximum value of ``D`` such that ``D <= max_D`` and the time series is judged seasonally stationary. If the time series is constant, will return 0.
[ "Estimate", "the", "seasonal", "differencing", "term", "D", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/utils.py#L60-L116
226,724
tgsmith61591/pmdarima
pmdarima/arima/utils.py
ndiffs
def ndiffs(x, alpha=0.05, test='kpss', max_d=2, **kwargs): """Estimate ARIMA differencing term, ``d``. Perform a test of stationarity for different levels of ``d`` to estimate the number of differences required to make a given time series stationary. Will select the maximum value of ``d`` for which the time series is judged stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array (time series) to difference. alpha : float, optional (default=0.05) Level of the test. This is the value above below which the P-value will be deemed significant. test : str, optional (default='kpss') Type of unit root test of stationarity to use in order to test the stationarity of the time-series. One of ('kpss', 'adf', 'pp') max_d : int, optional (default=2) Maximum number of non-seasonal differences allowed. Must be a positive integer. The estimated value of ``d`` will not exceed ``max_d``. Returns ------- d : int The estimated differencing term. This is the maximum value of ``d`` such that ``d <= max_d`` and the time series is judged stationary. If the time series is constant, will return 0. References ---------- .. [1] R's auto_arima ndiffs function: https://bit.ly/2Bu8CHN """ if max_d <= 0: raise ValueError('max_d must be a positive integer') # get the test testfunc = get_callable(test, VALID_TESTS)(alpha, **kwargs).should_diff x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) # base case, if constant return 0 d = 0 if is_constant(x): return d # get initial diff pval, dodiff = testfunc(x) # if initially NaN, return 0 if np.isnan(pval): return 0 # (d is zero, but this is more explicit to the reader) # Begin loop. while dodiff and d < max_d: d += 1 # do differencing x = diff(x) if is_constant(x): return d # get new result pval, dodiff = testfunc(x) # if it's NaN now, take the last non-null one if np.isnan(pval): return d - 1 # when d >= max_d return d
python
def ndiffs(x, alpha=0.05, test='kpss', max_d=2, **kwargs): if max_d <= 0: raise ValueError('max_d must be a positive integer') # get the test testfunc = get_callable(test, VALID_TESTS)(alpha, **kwargs).should_diff x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) # base case, if constant return 0 d = 0 if is_constant(x): return d # get initial diff pval, dodiff = testfunc(x) # if initially NaN, return 0 if np.isnan(pval): return 0 # (d is zero, but this is more explicit to the reader) # Begin loop. while dodiff and d < max_d: d += 1 # do differencing x = diff(x) if is_constant(x): return d # get new result pval, dodiff = testfunc(x) # if it's NaN now, take the last non-null one if np.isnan(pval): return d - 1 # when d >= max_d return d
[ "def", "ndiffs", "(", "x", ",", "alpha", "=", "0.05", ",", "test", "=", "'kpss'", ",", "max_d", "=", "2", ",", "*", "*", "kwargs", ")", ":", "if", "max_d", "<=", "0", ":", "raise", "ValueError", "(", "'max_d must be a positive integer'", ")", "# get th...
Estimate ARIMA differencing term, ``d``. Perform a test of stationarity for different levels of ``d`` to estimate the number of differences required to make a given time series stationary. Will select the maximum value of ``d`` for which the time series is judged stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array (time series) to difference. alpha : float, optional (default=0.05) Level of the test. This is the value above below which the P-value will be deemed significant. test : str, optional (default='kpss') Type of unit root test of stationarity to use in order to test the stationarity of the time-series. One of ('kpss', 'adf', 'pp') max_d : int, optional (default=2) Maximum number of non-seasonal differences allowed. Must be a positive integer. The estimated value of ``d`` will not exceed ``max_d``. Returns ------- d : int The estimated differencing term. This is the maximum value of ``d`` such that ``d <= max_d`` and the time series is judged stationary. If the time series is constant, will return 0. References ---------- .. [1] R's auto_arima ndiffs function: https://bit.ly/2Bu8CHN
[ "Estimate", "ARIMA", "differencing", "term", "d", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/utils.py#L119-L193
226,725
tgsmith61591/pmdarima
pmdarima/compat/statsmodels.py
bind_df_model
def bind_df_model(model_fit, arima_results): """Set model degrees of freedom. Older versions of statsmodels don't handle this issue. Sets the model degrees of freedom in place if not already present. Parameters ---------- model_fit : ARMA, ARIMA or SARIMAX The fitted model. arima_results : ModelResultsWrapper The results wrapper. """ if not hasattr(arima_results, 'df_model'): df_model = model_fit.k_exog + model_fit.k_trend + \ model_fit.k_ar + model_fit.k_ma + \ model_fit.k_seasonal_ar + model_fit.k_seasonal_ma setattr(arima_results, 'df_model', df_model)
python
def bind_df_model(model_fit, arima_results): if not hasattr(arima_results, 'df_model'): df_model = model_fit.k_exog + model_fit.k_trend + \ model_fit.k_ar + model_fit.k_ma + \ model_fit.k_seasonal_ar + model_fit.k_seasonal_ma setattr(arima_results, 'df_model', df_model)
[ "def", "bind_df_model", "(", "model_fit", ",", "arima_results", ")", ":", "if", "not", "hasattr", "(", "arima_results", ",", "'df_model'", ")", ":", "df_model", "=", "model_fit", ".", "k_exog", "+", "model_fit", ".", "k_trend", "+", "model_fit", ".", "k_ar",...
Set model degrees of freedom. Older versions of statsmodels don't handle this issue. Sets the model degrees of freedom in place if not already present. Parameters ---------- model_fit : ARMA, ARIMA or SARIMAX The fitted model. arima_results : ModelResultsWrapper The results wrapper.
[ "Set", "model", "degrees", "of", "freedom", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/compat/statsmodels.py#L15-L33
226,726
tgsmith61591/pmdarima
pmdarima/compat/matplotlib.py
get_compatible_pyplot
def get_compatible_pyplot(backend=None, debug=True): """Make the backend of MPL compatible. In Travis Mac distributions, python is not installed as a framework. This means that using the TkAgg backend is the best solution (so it doesn't try to use the mac OS backend by default). Parameters ---------- backend : str, optional (default="TkAgg") The backend to default to. debug : bool, optional (default=True) Whether to log the existing backend to stderr. """ import matplotlib # If the backend provided is None, just default to # what's already being used. existing_backend = matplotlib.get_backend() if backend is not None: # Can this raise?... matplotlib.use(backend) # Print out the new backend if debug: sys.stderr.write("Currently using '%s' MPL backend, " "switching to '%s' backend%s" % (existing_backend, backend, os.linesep)) # If backend is not set via env variable, but debug is elif debug: sys.stderr.write("Using '%s' MPL backend%s" % (existing_backend, os.linesep)) from matplotlib import pyplot as plt return plt
python
def get_compatible_pyplot(backend=None, debug=True): import matplotlib # If the backend provided is None, just default to # what's already being used. existing_backend = matplotlib.get_backend() if backend is not None: # Can this raise?... matplotlib.use(backend) # Print out the new backend if debug: sys.stderr.write("Currently using '%s' MPL backend, " "switching to '%s' backend%s" % (existing_backend, backend, os.linesep)) # If backend is not set via env variable, but debug is elif debug: sys.stderr.write("Using '%s' MPL backend%s" % (existing_backend, os.linesep)) from matplotlib import pyplot as plt return plt
[ "def", "get_compatible_pyplot", "(", "backend", "=", "None", ",", "debug", "=", "True", ")", ":", "import", "matplotlib", "# If the backend provided is None, just default to", "# what's already being used.", "existing_backend", "=", "matplotlib", ".", "get_backend", "(", ...
Make the backend of MPL compatible. In Travis Mac distributions, python is not installed as a framework. This means that using the TkAgg backend is the best solution (so it doesn't try to use the mac OS backend by default). Parameters ---------- backend : str, optional (default="TkAgg") The backend to default to. debug : bool, optional (default=True) Whether to log the existing backend to stderr.
[ "Make", "the", "backend", "of", "MPL", "compatible", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/compat/matplotlib.py#L17-L53
226,727
tgsmith61591/pmdarima
pmdarima/arima/auto.py
_return_wrapper
def _return_wrapper(fits, return_all, start, trace): """If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all. """ # make sure it's an iterable if not is_iterable(fits): fits = [fits] # whether to print the final runtime if trace: print('Total fit time: %.3f seconds' % (time.time() - start)) # which to return? if not all, then first index (assume sorted) if not return_all: return fits[0] return fits
python
def _return_wrapper(fits, return_all, start, trace): # make sure it's an iterable if not is_iterable(fits): fits = [fits] # whether to print the final runtime if trace: print('Total fit time: %.3f seconds' % (time.time() - start)) # which to return? if not all, then first index (assume sorted) if not return_all: return fits[0] return fits
[ "def", "_return_wrapper", "(", "fits", ",", "return_all", ",", "start", ",", "trace", ")", ":", "# make sure it's an iterable", "if", "not", "is_iterable", "(", "fits", ")", ":", "fits", "=", "[", "fits", "]", "# whether to print the final runtime", "if", "trace...
If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all.
[ "If", "the", "user", "wants", "to", "get", "all", "of", "the", "models", "back", "this", "will", "return", "a", "list", "of", "the", "ARIMA", "models", "otherwise", "it", "will", "just", "return", "the", "model", ".", "If", "this", "is", "called", "fro...
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/auto.py#L565-L593
226,728
tgsmith61591/pmdarima
pmdarima/arima/auto.py
AutoARIMA.fit
def fit(self, y, exogenous=None, **fit_args): """Fit the auto-arima estimator Fit an AutoARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the auto-arima function. """ self.model_ = auto_arima( y, exogenous=exogenous, start_p=self.start_p, d=self.d, start_q=self.start_q, max_p=self.max_p, max_d=self.max_d, max_q=self.max_q, start_P=self.start_P, D=self.D, start_Q=self.start_Q, max_P=self.max_P, max_D=self.max_D, max_Q=self.max_Q, max_order=self.max_order, m=self.m, seasonal=self.seasonal, stationary=self.stationary, information_criterion=self.information_criterion, alpha=self.alpha, test=self.test, seasonal_test=self.seasonal_test, stepwise=self.stepwise, n_jobs=self.n_jobs, start_params=self.start_params, trend=self.trend, method=self.method, transparams=self.transparams, solver=self.solver, maxiter=self.maxiter, disp=self.disp, callback=self.callback, offset_test_args=self.offset_test_args, seasonal_test_args=self.seasonal_test_args, suppress_warnings=self.suppress_warnings, error_action=self.error_action, trace=self.trace, random=self.random, random_state=self.random_state, n_fits=self.n_fits, return_valid_fits=False, # only return ONE out_of_sample_size=self.out_of_sample_size, scoring=self.scoring, scoring_args=self.scoring_args, with_intercept=self.with_intercept, **fit_args) return self
python
def fit(self, y, exogenous=None, **fit_args): self.model_ = auto_arima( y, exogenous=exogenous, start_p=self.start_p, d=self.d, start_q=self.start_q, max_p=self.max_p, max_d=self.max_d, max_q=self.max_q, start_P=self.start_P, D=self.D, start_Q=self.start_Q, max_P=self.max_P, max_D=self.max_D, max_Q=self.max_Q, max_order=self.max_order, m=self.m, seasonal=self.seasonal, stationary=self.stationary, information_criterion=self.information_criterion, alpha=self.alpha, test=self.test, seasonal_test=self.seasonal_test, stepwise=self.stepwise, n_jobs=self.n_jobs, start_params=self.start_params, trend=self.trend, method=self.method, transparams=self.transparams, solver=self.solver, maxiter=self.maxiter, disp=self.disp, callback=self.callback, offset_test_args=self.offset_test_args, seasonal_test_args=self.seasonal_test_args, suppress_warnings=self.suppress_warnings, error_action=self.error_action, trace=self.trace, random=self.random, random_state=self.random_state, n_fits=self.n_fits, return_valid_fits=False, # only return ONE out_of_sample_size=self.out_of_sample_size, scoring=self.scoring, scoring_args=self.scoring_args, with_intercept=self.with_intercept, **fit_args) return self
[ "def", "fit", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "*", "*", "fit_args", ")", ":", "self", ".", "model_", "=", "auto_arima", "(", "y", ",", "exogenous", "=", "exogenous", ",", "start_p", "=", "self", ".", "start_p", ",", "d", ...
Fit the auto-arima estimator Fit an AutoARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the auto-arima function.
[ "Fit", "the", "auto", "-", "arima", "estimator" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/auto.py#L101-L150
226,729
tgsmith61591/pmdarima
pmdarima/utils/visualization.py
autocorr_plot
def autocorr_plot(series, show=True): """Plot a series' auto-correlation. A wrapper method for the Pandas ``autocorrelation_plot`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> autocorr_plot([1, 2, 3], False) # doctest: +SKIP <matplotlib.axes._subplots.AxesSubplot object at 0x127f41dd8> Returns ------- res : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object. """ _err_for_no_mpl() res = ap(series) return _show_or_return(res, show)
python
def autocorr_plot(series, show=True): _err_for_no_mpl() res = ap(series) return _show_or_return(res, show)
[ "def", "autocorr_plot", "(", "series", ",", "show", "=", "True", ")", ":", "_err_for_no_mpl", "(", ")", "res", "=", "ap", "(", "series", ")", "return", "_show_or_return", "(", "res", ",", "show", ")" ]
Plot a series' auto-correlation. A wrapper method for the Pandas ``autocorrelation_plot`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> autocorr_plot([1, 2, 3], False) # doctest: +SKIP <matplotlib.axes._subplots.AxesSubplot object at 0x127f41dd8> Returns ------- res : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object.
[ "Plot", "a", "series", "auto", "-", "correlation", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/visualization.py#L61-L94
226,730
tgsmith61591/pmdarima
pmdarima/utils/visualization.py
plot_acf
def plot_acf(series, ax=None, lags=None, alpha=None, use_vlines=True, unbiased=False, fft=True, title='Autocorrelation', zero=True, vlines_kwargs=None, show=True, **kwargs): """Plot a series' auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_acf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. unbiased : bool, optional (default=False) If True, then denominators for autocovariance are n-k, otherwise n fft : bool, optional (default=True) If True, computes the ACF via FFT. title : str, optional (default='Autocorrelation') Title to place on plot. Default is 'Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_acf([1, 2, 3], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x122fab4e0> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object. """ _err_for_no_mpl() res = pacf(x=series, ax=ax, lags=lags, alpha=alpha, use_vlines=use_vlines, unbiased=unbiased, fft=fft, title=title, zero=zero, vlines_kwargs=vlines_kwargs, **kwargs) return _show_or_return(res, show)
python
def plot_acf(series, ax=None, lags=None, alpha=None, use_vlines=True, unbiased=False, fft=True, title='Autocorrelation', zero=True, vlines_kwargs=None, show=True, **kwargs): _err_for_no_mpl() res = pacf(x=series, ax=ax, lags=lags, alpha=alpha, use_vlines=use_vlines, unbiased=unbiased, fft=fft, title=title, zero=zero, vlines_kwargs=vlines_kwargs, **kwargs) return _show_or_return(res, show)
[ "def", "plot_acf", "(", "series", ",", "ax", "=", "None", ",", "lags", "=", "None", ",", "alpha", "=", "None", ",", "use_vlines", "=", "True", ",", "unbiased", "=", "False", ",", "fft", "=", "True", ",", "title", "=", "'Autocorrelation'", ",", "zero"...
Plot a series' auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_acf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. unbiased : bool, optional (default=False) If True, then denominators for autocovariance are n-k, otherwise n fft : bool, optional (default=True) If True, computes the ACF via FFT. title : str, optional (default='Autocorrelation') Title to place on plot. Default is 'Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_acf([1, 2, 3], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x122fab4e0> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object.
[ "Plot", "a", "series", "auto", "-", "correlation", "as", "a", "line", "plot", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/visualization.py#L97-L175
226,731
tgsmith61591/pmdarima
pmdarima/utils/visualization.py
plot_pacf
def plot_pacf(series, ax=None, lags=None, alpha=None, method='yw', use_vlines=True, title='Partial Autocorrelation', zero=True, vlines_kwargs=None, show=True, **kwargs): """Plot a series' partial auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_pacf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. method : str, optional (default='yw') Specifies which method for the calculations to use. One of {'ywunbiased', 'ywmle', 'ols', 'ld', 'ldb', 'ldunbiased', 'ldbiased'}: - yw or ywunbiased : yule walker with bias correction in denominator for acovf. Default. - ywm or ywmle : yule walker without bias correction - ols - regression of time series on lags of it and on constant - ld or ldunbiased : Levinson-Durbin recursion with bias correction - ldb or ldbiased : Levinson-Durbin recursion without bias correction use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. title : str, optional (default='Partial Autocorrelation') Title to place on plot. Default is 'Partial Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_pacf([1, 2, 3, 4], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x129df1630> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object. """ _err_for_no_mpl() res = ppacf(x=series, ax=ax, lags=lags, alpha=alpha, method=method, use_vlines=use_vlines, title=title, zero=zero, vlines_kwargs=vlines_kwargs, **kwargs) return _show_or_return(res, show)
python
def plot_pacf(series, ax=None, lags=None, alpha=None, method='yw', use_vlines=True, title='Partial Autocorrelation', zero=True, vlines_kwargs=None, show=True, **kwargs): _err_for_no_mpl() res = ppacf(x=series, ax=ax, lags=lags, alpha=alpha, method=method, use_vlines=use_vlines, title=title, zero=zero, vlines_kwargs=vlines_kwargs, **kwargs) return _show_or_return(res, show)
[ "def", "plot_pacf", "(", "series", ",", "ax", "=", "None", ",", "lags", "=", "None", ",", "alpha", "=", "None", ",", "method", "=", "'yw'", ",", "use_vlines", "=", "True", ",", "title", "=", "'Partial Autocorrelation'", ",", "zero", "=", "True", ",", ...
Plot a series' partial auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_pacf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. method : str, optional (default='yw') Specifies which method for the calculations to use. One of {'ywunbiased', 'ywmle', 'ols', 'ld', 'ldb', 'ldunbiased', 'ldbiased'}: - yw or ywunbiased : yule walker with bias correction in denominator for acovf. Default. - ywm or ywmle : yule walker without bias correction - ols - regression of time series on lags of it and on constant - ld or ldunbiased : Levinson-Durbin recursion with bias correction - ldb or ldbiased : Levinson-Durbin recursion without bias correction use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. title : str, optional (default='Partial Autocorrelation') Title to place on plot. Default is 'Partial Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_pacf([1, 2, 3, 4], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x129df1630> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object.
[ "Plot", "a", "series", "partial", "auto", "-", "correlation", "as", "a", "line", "plot", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/visualization.py#L178-L261
226,732
tgsmith61591/pmdarima
pmdarima/datasets/airpassengers.py
load_airpassengers
def load_airpassengers(as_series=False): """Monthly airline passengers. The classic Box & Jenkins airline data. Monthly totals of international airline passengers, 1949 to 1960. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The time series vector. Examples -------- >>> from pmdarima.datasets import load_airpassengers >>> load_airpassengers() # doctest: +SKIP np.array([ 112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432]) >>> load_airpassengers(True).head() 0 112.0 1 118.0 2 132.0 3 129.0 4 121.0 dtype: float64 Notes ----- This is monthly data, so *m* should be set to 12 when using in a seasonal context. References ---------- .. [1] Box, G. E. P., Jenkins, G. M. and Reinsel, G. C. (1976) "Time Series Analysis, Forecasting and Control. Third Edition." Holden-Day. Series G. """ rslt = np.array([ 112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432 ]).astype(np.float64) if as_series: return pd.Series(rslt) return rslt
python
def load_airpassengers(as_series=False): rslt = np.array([ 112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432 ]).astype(np.float64) if as_series: return pd.Series(rslt) return rslt
[ "def", "load_airpassengers", "(", "as_series", "=", "False", ")", ":", "rslt", "=", "np", ".", "array", "(", "[", "112", ",", "118", ",", "132", ",", "129", ",", "121", ",", "135", ",", "148", ",", "148", ",", "136", ",", "119", ",", "104", ","...
Monthly airline passengers. The classic Box & Jenkins airline data. Monthly totals of international airline passengers, 1949 to 1960. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The time series vector. Examples -------- >>> from pmdarima.datasets import load_airpassengers >>> load_airpassengers() # doctest: +SKIP np.array([ 112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432]) >>> load_airpassengers(True).head() 0 112.0 1 118.0 2 132.0 3 129.0 4 121.0 dtype: float64 Notes ----- This is monthly data, so *m* should be set to 12 when using in a seasonal context. References ---------- .. [1] Box, G. E. P., Jenkins, G. M. and Reinsel, G. C. (1976) "Time Series Analysis, Forecasting and Control. Third Edition." Holden-Day. Series G.
[ "Monthly", "airline", "passengers", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/datasets/airpassengers.py#L13-L84
226,733
tgsmith61591/pmdarima
pmdarima/utils/metaestimators.py
if_has_delegate
def if_has_delegate(delegate): """Wrap a delegated instance attribute function. Creates a decorator for methods that are delegated in the presence of a results wrapper. This enables duck-typing by ``hasattr`` returning True according to the sub-estimator. This function was adapted from scikit-learn, which defines ``if_delegate_has_method``, but operates differently by injecting methods not based on method presence, but by delegate presence. Examples -------- >>> from pmdarima.utils.metaestimators import if_has_delegate >>> >>> class A(object): ... @if_has_delegate('d') ... def func(self): ... return True >>> >>> a = A() >>> # the delegate does not exist yet >>> assert not hasattr(a, 'func') >>> # inject the attribute >>> a.d = None >>> assert hasattr(a, 'func') and a.func() Parameters ---------- delegate : string, list of strings or tuple of strings Name of the sub-estimator that can be accessed as an attribute of the base object. If a list or a tuple of names are provided, the first sub-estimator that is an attribute of the base object will be used. """ if isinstance(delegate, list): delegate = tuple(delegate) if not isinstance(delegate, tuple): delegate = (delegate,) return lambda fn: _IffHasDelegate(fn, delegate)
python
def if_has_delegate(delegate): if isinstance(delegate, list): delegate = tuple(delegate) if not isinstance(delegate, tuple): delegate = (delegate,) return lambda fn: _IffHasDelegate(fn, delegate)
[ "def", "if_has_delegate", "(", "delegate", ")", ":", "if", "isinstance", "(", "delegate", ",", "list", ")", ":", "delegate", "=", "tuple", "(", "delegate", ")", "if", "not", "isinstance", "(", "delegate", ",", "tuple", ")", ":", "delegate", "=", "(", "...
Wrap a delegated instance attribute function. Creates a decorator for methods that are delegated in the presence of a results wrapper. This enables duck-typing by ``hasattr`` returning True according to the sub-estimator. This function was adapted from scikit-learn, which defines ``if_delegate_has_method``, but operates differently by injecting methods not based on method presence, but by delegate presence. Examples -------- >>> from pmdarima.utils.metaestimators import if_has_delegate >>> >>> class A(object): ... @if_has_delegate('d') ... def func(self): ... return True >>> >>> a = A() >>> # the delegate does not exist yet >>> assert not hasattr(a, 'func') >>> # inject the attribute >>> a.d = None >>> assert hasattr(a, 'func') and a.func() Parameters ---------- delegate : string, list of strings or tuple of strings Name of the sub-estimator that can be accessed as an attribute of the base object. If a list or a tuple of names are provided, the first sub-estimator that is an attribute of the base object will be used.
[ "Wrap", "a", "delegated", "instance", "attribute", "function", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/metaestimators.py#L61-L100
226,734
tgsmith61591/pmdarima
pmdarima/datasets/heartrate.py
load_heartrate
def load_heartrate(as_series=False): """Uniform heart-rate data. A sample of heartrate data borrowed from an `MIT database <http://ecg.mit.edu/time-series/>`_. The sample consists of 150 evenly spaced (0.5 seconds) heartrate measurements. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The heartrate vector. Examples -------- >>> from pmdarima.datasets import load_heartrate >>> load_heartrate() array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904 , 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389 , 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972 , 91.7821, 91.7911, 90.807 , 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243 , 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771 , 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963 , 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899 , 95.1592, 95.2415, 95.5414, 95.0971, 94.528 , 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044 , 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077 , 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052 , 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 ]) >>> load_heartrate(True).head() 0 84.2697 1 84.2697 2 84.0619 3 85.6542 4 87.2093 dtype: float64 References ---------- .. [1] Goldberger AL, Rigney DR. Nonlinear dynamics at the bedside. In: Glass L, Hunter P, McCulloch A, eds. Theory of Heart: Biomechanics, Biophysics, and Nonlinear Dynamics of Cardiac Function. New York: Springer-Verlag, 1991, pp. 583-605. """ rslt = np.array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904, 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389, 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972, 91.7821, 91.7911, 90.807, 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243, 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771, 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963, 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899, 95.1592, 95.2415, 95.5414, 95.0971, 94.528, 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044, 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077, 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052, 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645]) if as_series: return pd.Series(rslt) return rslt
python
def load_heartrate(as_series=False): rslt = np.array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904, 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389, 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972, 91.7821, 91.7911, 90.807, 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243, 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771, 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963, 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899, 95.1592, 95.2415, 95.5414, 95.0971, 94.528, 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044, 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077, 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052, 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645]) if as_series: return pd.Series(rslt) return rslt
[ "def", "load_heartrate", "(", "as_series", "=", "False", ")", ":", "rslt", "=", "np", ".", "array", "(", "[", "84.2697", ",", "84.2697", ",", "84.0619", ",", "85.6542", ",", "87.2093", ",", "87.1246", ",", "86.8726", ",", "86.7052", ",", "87.5899", ","...
Uniform heart-rate data. A sample of heartrate data borrowed from an `MIT database <http://ecg.mit.edu/time-series/>`_. The sample consists of 150 evenly spaced (0.5 seconds) heartrate measurements. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The heartrate vector. Examples -------- >>> from pmdarima.datasets import load_heartrate >>> load_heartrate() array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904 , 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389 , 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972 , 91.7821, 91.7911, 90.807 , 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243 , 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771 , 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963 , 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899 , 95.1592, 95.2415, 95.5414, 95.0971, 94.528 , 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044 , 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077 , 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052 , 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 ]) >>> load_heartrate(True).head() 0 84.2697 1 84.2697 2 84.0619 3 85.6542 4 87.2093 dtype: float64 References ---------- .. [1] Goldberger AL, Rigney DR. Nonlinear dynamics at the bedside. In: Glass L, Hunter P, McCulloch A, eds. Theory of Heart: Biomechanics, Biophysics, and Nonlinear Dynamics of Cardiac Function. New York: Springer-Verlag, 1991, pp. 583-605.
[ "Uniform", "heart", "-", "rate", "data", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/datasets/heartrate.py#L13-L102
226,735
tgsmith61591/pmdarima
benchmarks/benchmark_funcs.py
benchmark_is_constant
def benchmark_is_constant(): """This benchmarks the "is_constant" function from ``pmdarima.arima.utils`` This was added in 0.6.2. """ # WINNER! def is_const1(x): """This is the version in Pyramid 0.6.2. Parameters ---------- x : np.ndarray This is the array. """ return (x == x[0]).all() def is_const2(x): """This should ostensibly only take O(N) rather than O(2N) like its predecessor. But we'll see... Parameters ---------- x : np.ndarray This is the array. """ return np.unique(x).shape[0] == 1 x = np.random.choice(np.arange(10), 1000000, replace=True) _do_time(is_const1, 25, x) _do_time(is_const2, 25, x)
python
def benchmark_is_constant(): # WINNER! def is_const1(x): """This is the version in Pyramid 0.6.2. Parameters ---------- x : np.ndarray This is the array. """ return (x == x[0]).all() def is_const2(x): """This should ostensibly only take O(N) rather than O(2N) like its predecessor. But we'll see... Parameters ---------- x : np.ndarray This is the array. """ return np.unique(x).shape[0] == 1 x = np.random.choice(np.arange(10), 1000000, replace=True) _do_time(is_const1, 25, x) _do_time(is_const2, 25, x)
[ "def", "benchmark_is_constant", "(", ")", ":", "# WINNER!", "def", "is_const1", "(", "x", ")", ":", "\"\"\"This is the version in Pyramid 0.6.2.\n\n Parameters\n ----------\n x : np.ndarray\n This is the array.\n \"\"\"", "return", "(", "x", "==...
This benchmarks the "is_constant" function from ``pmdarima.arima.utils`` This was added in 0.6.2.
[ "This", "benchmarks", "the", "is_constant", "function", "from", "pmdarima", ".", "arima", ".", "utils", "This", "was", "added", "in", "0", ".", "6", ".", "2", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/benchmarks/benchmark_funcs.py#L25-L53
226,736
tgsmith61591/pmdarima
pmdarima/datasets/woolyrnq.py
load_woolyrnq
def load_woolyrnq(as_series=False): """Quarterly production of woollen yarn in Australia. This time-series records the quarterly production (in tonnes) of woollen yarn in Australia between Mar 1965 and Sep 1994. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If True, the index will be set to the observed years/quarters. If False, will return a 1d numpy array. Examples -------- >>> from pmdarima.datasets import load_woolyrnq >>> load_woolyrnq() array([6172, 6709, 6633, 6660, 6786, 6800, 6730, 6765, 6720, 7133, 6946, 7095, 7047, 6757, 6915, 6921, 7064, 7206, 7190, 7402, 7819, 7300, 7105, 7259, 7001, 7475, 6840, 7061, 5845, 7529, 7819, 6943, 5714, 6556, 7045, 5947, 5463, 6127, 5540, 4235, 3324, 4793, 5906, 5834, 5240, 5458, 5505, 5002, 3999, 4826, 5318, 4681, 4442, 5305, 5466, 4995, 4573, 5081, 5696, 5079, 4373, 4986, 5341, 4800, 4161, 5007, 5464, 5127, 4240, 5338, 5129, 4437, 3642, 4602, 5524, 4895, 4380, 5186, 6080, 5588, 5009, 5663, 6540, 6262, 5169, 5819, 6339, 5981, 4766, 5976, 6590, 5590, 5135, 5762, 6077, 5882, 4247, 5264, 5146, 4868, 4329, 4869, 5127, 4868, 3827, 4987, 5222, 4928, 3930, 4469, 4954, 4752, 3888, 4588, 5309, 4732, 4837, 6135, 6396]) >>> load_woolyrnq(True).head() Q1 1965 6172 Q2 1965 6709 Q3 1965 6633 Q4 1965 6660 Q1 1966 6786 dtype: int64 Notes ----- This is quarterly data, so *m* should be set to 4 when using in a seasonal context. References ---------- .. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/woolyrnq # noqa: E501 Returns ------- rslt : array-like, shape=(n_samples,) The woolyrnq dataset. There are 119 observations. """ rslt = np.array([ 6172, 6709, 6633, 6660, 6786, 6800, 6730, 6765, 6720, 7133, 6946, 7095, 7047, 6757, 6915, 6921, 7064, 7206, 7190, 7402, 7819, 7300, 7105, 7259, 7001, 7475, 6840, 7061, 5845, 7529, 7819, 6943, 5714, 6556, 7045, 5947, 5463, 6127, 5540, 4235, 3324, 4793, 5906, 5834, 5240, 5458, 5505, 5002, 3999, 4826, 5318, 4681, 4442, 5305, 5466, 4995, 4573, 5081, 5696, 5079, 4373, 4986, 5341, 4800, 4161, 5007, 5464, 5127, 4240, 5338, 5129, 4437, 3642, 4602, 5524, 4895, 4380, 5186, 6080, 5588, 5009, 5663, 6540, 6262, 5169, 5819, 6339, 5981, 4766, 5976, 6590, 5590, 5135, 5762, 6077, 5882, 4247, 5264, 5146, 4868, 4329, 4869, 5127, 4868, 3827, 4987, 5222, 4928, 3930, 4469, 4954, 4752, 3888, 4588, 5309, 4732, 4837, 6135, 6396]) if not as_series: return rslt # Otherwise we want a series and have to cleverly create the index # (with quarters, and we don't want Q4 in 1994) index = [ "Q%i %i" % (i + 1, year) for year in range(1965, 1995) for i in range(4) ][:-1] # trim off the last one. return pd.Series(rslt, index=index)
python
def load_woolyrnq(as_series=False): rslt = np.array([ 6172, 6709, 6633, 6660, 6786, 6800, 6730, 6765, 6720, 7133, 6946, 7095, 7047, 6757, 6915, 6921, 7064, 7206, 7190, 7402, 7819, 7300, 7105, 7259, 7001, 7475, 6840, 7061, 5845, 7529, 7819, 6943, 5714, 6556, 7045, 5947, 5463, 6127, 5540, 4235, 3324, 4793, 5906, 5834, 5240, 5458, 5505, 5002, 3999, 4826, 5318, 4681, 4442, 5305, 5466, 4995, 4573, 5081, 5696, 5079, 4373, 4986, 5341, 4800, 4161, 5007, 5464, 5127, 4240, 5338, 5129, 4437, 3642, 4602, 5524, 4895, 4380, 5186, 6080, 5588, 5009, 5663, 6540, 6262, 5169, 5819, 6339, 5981, 4766, 5976, 6590, 5590, 5135, 5762, 6077, 5882, 4247, 5264, 5146, 4868, 4329, 4869, 5127, 4868, 3827, 4987, 5222, 4928, 3930, 4469, 4954, 4752, 3888, 4588, 5309, 4732, 4837, 6135, 6396]) if not as_series: return rslt # Otherwise we want a series and have to cleverly create the index # (with quarters, and we don't want Q4 in 1994) index = [ "Q%i %i" % (i + 1, year) for year in range(1965, 1995) for i in range(4) ][:-1] # trim off the last one. return pd.Series(rslt, index=index)
[ "def", "load_woolyrnq", "(", "as_series", "=", "False", ")", ":", "rslt", "=", "np", ".", "array", "(", "[", "6172", ",", "6709", ",", "6633", ",", "6660", ",", "6786", ",", "6800", ",", "6730", ",", "6765", ",", "6720", ",", "7133", ",", "6946",...
Quarterly production of woollen yarn in Australia. This time-series records the quarterly production (in tonnes) of woollen yarn in Australia between Mar 1965 and Sep 1994. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If True, the index will be set to the observed years/quarters. If False, will return a 1d numpy array. Examples -------- >>> from pmdarima.datasets import load_woolyrnq >>> load_woolyrnq() array([6172, 6709, 6633, 6660, 6786, 6800, 6730, 6765, 6720, 7133, 6946, 7095, 7047, 6757, 6915, 6921, 7064, 7206, 7190, 7402, 7819, 7300, 7105, 7259, 7001, 7475, 6840, 7061, 5845, 7529, 7819, 6943, 5714, 6556, 7045, 5947, 5463, 6127, 5540, 4235, 3324, 4793, 5906, 5834, 5240, 5458, 5505, 5002, 3999, 4826, 5318, 4681, 4442, 5305, 5466, 4995, 4573, 5081, 5696, 5079, 4373, 4986, 5341, 4800, 4161, 5007, 5464, 5127, 4240, 5338, 5129, 4437, 3642, 4602, 5524, 4895, 4380, 5186, 6080, 5588, 5009, 5663, 6540, 6262, 5169, 5819, 6339, 5981, 4766, 5976, 6590, 5590, 5135, 5762, 6077, 5882, 4247, 5264, 5146, 4868, 4329, 4869, 5127, 4868, 3827, 4987, 5222, 4928, 3930, 4469, 4954, 4752, 3888, 4588, 5309, 4732, 4837, 6135, 6396]) >>> load_woolyrnq(True).head() Q1 1965 6172 Q2 1965 6709 Q3 1965 6633 Q4 1965 6660 Q1 1966 6786 dtype: int64 Notes ----- This is quarterly data, so *m* should be set to 4 when using in a seasonal context. References ---------- .. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/woolyrnq # noqa: E501 Returns ------- rslt : array-like, shape=(n_samples,) The woolyrnq dataset. There are 119 observations.
[ "Quarterly", "production", "of", "woollen", "yarn", "in", "Australia", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/datasets/woolyrnq.py#L17-L110
226,737
tgsmith61591/pmdarima
pmdarima/utils/array.py
as_series
def as_series(x): """Cast as pandas Series. Cast an iterable to a Pandas Series object. Note that the index will simply be a positional ``arange`` and cannot be set in this function. Parameters ---------- x : array-like, shape=(n_samples,) The 1d array on which to compute the auto correlation. Examples -------- >>> as_series([1, 2, 3]) 0 1 1 2 2 3 dtype: int64 >>> as_series(as_series((1, 2, 3))) 0 1 1 2 2 3 dtype: int64 >>> import pandas as pd >>> as_series(pd.Series([4, 5, 6], index=['a', 'b', 'c'])) a 4 b 5 c 6 dtype: int64 Returns ------- s : pd.Series A pandas Series object. """ if isinstance(x, pd.Series): return x return pd.Series(column_or_1d(x))
python
def as_series(x): if isinstance(x, pd.Series): return x return pd.Series(column_or_1d(x))
[ "def", "as_series", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "pd", ".", "Series", ")", ":", "return", "x", "return", "pd", ".", "Series", "(", "column_or_1d", "(", "x", ")", ")" ]
Cast as pandas Series. Cast an iterable to a Pandas Series object. Note that the index will simply be a positional ``arange`` and cannot be set in this function. Parameters ---------- x : array-like, shape=(n_samples,) The 1d array on which to compute the auto correlation. Examples -------- >>> as_series([1, 2, 3]) 0 1 1 2 2 3 dtype: int64 >>> as_series(as_series((1, 2, 3))) 0 1 1 2 2 3 dtype: int64 >>> import pandas as pd >>> as_series(pd.Series([4, 5, 6], index=['a', 'b', 'c'])) a 4 b 5 c 6 dtype: int64 Returns ------- s : pd.Series A pandas Series object.
[ "Cast", "as", "pandas", "Series", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/array.py#L23-L63
226,738
tgsmith61591/pmdarima
pmdarima/utils/array.py
c
def c(*args): r"""Imitates the ``c`` function from R. Since this whole library is aimed at re-creating in Python what R has already done so well, the ``c`` function was created to wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R, this works with scalars, iterables, and any mix therein. Note that using the ``c`` function on multi-nested lists or iterables will fail! Examples -------- Using ``c`` with varargs will yield a single array: >>> c(1, 2, 3, 4) array([1, 2, 3, 4]) Using ``c`` with nested lists and scalars will also yield a single array: >>> c([1, 2], 4, c(5, 4)) array([1, 2, 4, 5, 4]) However, using ``c`` with multi-level lists will fail! >>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP ValueError: all the input arrays must have same number of dimensions References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html """ # R returns NULL for this if not args: return None # just an array of len 1 if len(args) == 1: element = args[0] # if it's iterable, make it an array if is_iterable(element): return np.asarray(element) # otherwise it's not iterable, put it in an array return np.asarray([element]) # np.concat all. This can be slow, as noted by numerous threads on # numpy concat efficiency, however an alternative using recursive # yields was tested and performed far worse: # # >>> def timeit(func, ntimes, *args): # ... times = [] # ... for i in range(ntimes): # ... start = time.time() # ... func(*args) # ... times.append(time.time() - start) # ... arr = np.asarray(times) # ... print("%s (%i times) - Mean: %.5f sec, " # ... "Min: %.5f sec, Max: %.5f" % (func.__name__, ntimes, # ... arr.mean(), arr.min(), # ... arr.max())) # >>> y = [np.arange(10000), range(500), (1000,), 100, np.arange(50000)] # >>> timeit(c1, 100, *y) # c1 (100 times) - Mean: 0.00009 sec, Min: 0.00006 sec, Max: 0.00065 # >>> timeit(c2, 100, *y) # c2 (100 times) - Mean: 0.08708 sec, Min: 0.08273 sec, Max: 0.10115 # # So we stick with c1, which is this variant. return np.concatenate([a if is_iterable(a) else [a] for a in args])
python
def c(*args): r"""Imitates the ``c`` function from R. Since this whole library is aimed at re-creating in Python what R has already done so well, the ``c`` function was created to wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R, this works with scalars, iterables, and any mix therein. Note that using the ``c`` function on multi-nested lists or iterables will fail! Examples -------- Using ``c`` with varargs will yield a single array: >>> c(1, 2, 3, 4) array([1, 2, 3, 4]) Using ``c`` with nested lists and scalars will also yield a single array: >>> c([1, 2], 4, c(5, 4)) array([1, 2, 4, 5, 4]) However, using ``c`` with multi-level lists will fail! >>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP ValueError: all the input arrays must have same number of dimensions References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html """ # R returns NULL for this if not args: return None # just an array of len 1 if len(args) == 1: element = args[0] # if it's iterable, make it an array if is_iterable(element): return np.asarray(element) # otherwise it's not iterable, put it in an array return np.asarray([element]) # np.concat all. This can be slow, as noted by numerous threads on # numpy concat efficiency, however an alternative using recursive # yields was tested and performed far worse: # # >>> def timeit(func, ntimes, *args): # ... times = [] # ... for i in range(ntimes): # ... start = time.time() # ... func(*args) # ... times.append(time.time() - start) # ... arr = np.asarray(times) # ... print("%s (%i times) - Mean: %.5f sec, " # ... "Min: %.5f sec, Max: %.5f" % (func.__name__, ntimes, # ... arr.mean(), arr.min(), # ... arr.max())) # >>> y = [np.arange(10000), range(500), (1000,), 100, np.arange(50000)] # >>> timeit(c1, 100, *y) # c1 (100 times) - Mean: 0.00009 sec, Min: 0.00006 sec, Max: 0.00065 # >>> timeit(c2, 100, *y) # c2 (100 times) - Mean: 0.08708 sec, Min: 0.08273 sec, Max: 0.10115 # # So we stick with c1, which is this variant. return np.concatenate([a if is_iterable(a) else [a] for a in args])
[ "def", "c", "(", "*", "args", ")", ":", "# R returns NULL for this", "if", "not", "args", ":", "return", "None", "# just an array of len 1", "if", "len", "(", "args", ")", "==", "1", ":", "element", "=", "args", "[", "0", "]", "# if it's iterable, make it an...
r"""Imitates the ``c`` function from R. Since this whole library is aimed at re-creating in Python what R has already done so well, the ``c`` function was created to wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R, this works with scalars, iterables, and any mix therein. Note that using the ``c`` function on multi-nested lists or iterables will fail! Examples -------- Using ``c`` with varargs will yield a single array: >>> c(1, 2, 3, 4) array([1, 2, 3, 4]) Using ``c`` with nested lists and scalars will also yield a single array: >>> c([1, 2], 4, c(5, 4)) array([1, 2, 4, 5, 4]) However, using ``c`` with multi-level lists will fail! >>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP ValueError: all the input arrays must have same number of dimensions References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html
[ "r", "Imitates", "the", "c", "function", "from", "R", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/array.py#L66-L135
226,739
tgsmith61591/pmdarima
pmdarima/utils/array.py
diff
def diff(x, lag=1, differences=1): """Difference an array. A python implementation of the R ``diff`` function [1]. This computes lag differences from an array given a ``lag`` and ``differencing`` term. If ``x`` is a vector of length :math:`n`, ``lag=1`` and ``differences=1``, then the computed result is equal to the successive differences ``x[lag:n] - x[:n-lag]``. Examples -------- Where ``lag=1`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 1) array([ -6., -2., 7., 25.], dtype=float32) Where ``lag=1`` and ``differences=2``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 2) array([ 4., 9., 18.], dtype=float32) Where ``lag=3`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 3, 1) array([ -1., 30.], dtype=float32) Where ``lag=6`` (larger than the array is) and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 6, 1) array([], dtype=float32) For a 2d array with ``lag=1`` and ``differences=1``: >>> import numpy as np >>> >>> x = np.arange(1, 10).reshape((3, 3)).T >>> diff(x, 1, 1) array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. lag : int, optional (default=1) An integer > 0 indicating which lag to use. differences : int, optional (default=1) An integer > 0 indicating the order of the difference. Returns ------- res : np.ndarray, shape=(n_samples, [n_features]) The result of the differenced arrays. References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/diff.html """ if any(v < 1 for v in (lag, differences)): raise ValueError('lag and differences must be positive (> 0) integers') x = check_array(x, ensure_2d=False, dtype=np.float32) # type: np.ndarray fun = _diff_vector if x.ndim == 1 else _diff_matrix res = x # "recurse" over range of differences for i in range(differences): res = fun(res, lag) # if it ever comes back empty, just return it as is if not res.shape[0]: return res return res
python
def diff(x, lag=1, differences=1): if any(v < 1 for v in (lag, differences)): raise ValueError('lag and differences must be positive (> 0) integers') x = check_array(x, ensure_2d=False, dtype=np.float32) # type: np.ndarray fun = _diff_vector if x.ndim == 1 else _diff_matrix res = x # "recurse" over range of differences for i in range(differences): res = fun(res, lag) # if it ever comes back empty, just return it as is if not res.shape[0]: return res return res
[ "def", "diff", "(", "x", ",", "lag", "=", "1", ",", "differences", "=", "1", ")", ":", "if", "any", "(", "v", "<", "1", "for", "v", "in", "(", "lag", ",", "differences", ")", ")", ":", "raise", "ValueError", "(", "'lag and differences must be positiv...
Difference an array. A python implementation of the R ``diff`` function [1]. This computes lag differences from an array given a ``lag`` and ``differencing`` term. If ``x`` is a vector of length :math:`n`, ``lag=1`` and ``differences=1``, then the computed result is equal to the successive differences ``x[lag:n] - x[:n-lag]``. Examples -------- Where ``lag=1`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 1) array([ -6., -2., 7., 25.], dtype=float32) Where ``lag=1`` and ``differences=2``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 2) array([ 4., 9., 18.], dtype=float32) Where ``lag=3`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 3, 1) array([ -1., 30.], dtype=float32) Where ``lag=6`` (larger than the array is) and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 6, 1) array([], dtype=float32) For a 2d array with ``lag=1`` and ``differences=1``: >>> import numpy as np >>> >>> x = np.arange(1, 10).reshape((3, 3)).T >>> diff(x, 1, 1) array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. lag : int, optional (default=1) An integer > 0 indicating which lag to use. differences : int, optional (default=1) An integer > 0 indicating the order of the difference. Returns ------- res : np.ndarray, shape=(n_samples, [n_features]) The result of the differenced arrays. References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/diff.html
[ "Difference", "an", "array", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/array.py#L152-L231
226,740
tgsmith61591/pmdarima
pmdarima/arima/arima.py
_aicc
def _aicc(model_results, nobs): """Compute the corrected Akaike Information Criterion""" aic = model_results.aic df_model = model_results.df_model + 1 # add one for constant term return aic + 2. * df_model * (nobs / (nobs - df_model - 1.) - 1.)
python
def _aicc(model_results, nobs): aic = model_results.aic df_model = model_results.df_model + 1 # add one for constant term return aic + 2. * df_model * (nobs / (nobs - df_model - 1.) - 1.)
[ "def", "_aicc", "(", "model_results", ",", "nobs", ")", ":", "aic", "=", "model_results", ".", "aic", "df_model", "=", "model_results", ".", "df_model", "+", "1", "# add one for constant term", "return", "aic", "+", "2.", "*", "df_model", "*", "(", "nobs", ...
Compute the corrected Akaike Information Criterion
[ "Compute", "the", "corrected", "Akaike", "Information", "Criterion" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L45-L49
226,741
tgsmith61591/pmdarima
pmdarima/arima/arima.py
_append_to_endog
def _append_to_endog(endog, new_y): """Append to the endogenous array Parameters ---------- endog : np.ndarray, shape=(n_samples, [1]) The existing endogenous array new_y : np.ndarray, shape=(n_samples) The new endogenous array to append """ return np.concatenate((endog, new_y)) if \ endog.ndim == 1 else \ np.concatenate((endog.ravel(), new_y))[:, np.newaxis]
python
def _append_to_endog(endog, new_y): return np.concatenate((endog, new_y)) if \ endog.ndim == 1 else \ np.concatenate((endog.ravel(), new_y))[:, np.newaxis]
[ "def", "_append_to_endog", "(", "endog", ",", "new_y", ")", ":", "return", "np", ".", "concatenate", "(", "(", "endog", ",", "new_y", ")", ")", "if", "endog", ".", "ndim", "==", "1", "else", "np", ".", "concatenate", "(", "(", "endog", ".", "ravel", ...
Append to the endogenous array Parameters ---------- endog : np.ndarray, shape=(n_samples, [1]) The existing endogenous array new_y : np.ndarray, shape=(n_samples) The new endogenous array to append
[ "Append", "to", "the", "endogenous", "array" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L52-L65
226,742
tgsmith61591/pmdarima
pmdarima/arima/arima.py
ARIMA.fit
def fit(self, y, exogenous=None, **fit_args): """Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the statsmodels ARIMA fit. """ y = c1d(check_array(y, ensure_2d=False, force_all_finite=False, copy=True, dtype=DTYPE)) # type: np.ndarray n_samples = y.shape[0] # if exog was included, check the array... if exogenous is not None: exogenous = check_array(exogenous, ensure_2d=True, force_all_finite=False, copy=False, dtype=DTYPE) # determine the CV args, if any cv = self.out_of_sample_size scoring = get_callable(self.scoring, VALID_SCORING) # don't allow negative, don't allow > n_samples cv = max(cv, 0) # if cv is too big, raise if cv >= n_samples: raise ValueError("out-of-sample size must be less than number " "of samples!") # If we want to get a score on the out-of-sample, we need to trim # down the size of our y vec for fitting. Addressed due to Issue #28 cv_samples = None cv_exog = None if cv: cv_samples = y[-cv:] y = y[:-cv] # This also means we have to address the exogenous matrix if exogenous is not None: cv_exog = exogenous[-cv:, :] exogenous = exogenous[:-cv, :] # Internal call self._fit(y, exogenous, **fit_args) # now make a forecast if we're validating to compute the # out-of-sample score if cv_samples is not None: # get the predictions (use self.predict, which calls forecast # from statsmodels internally) pred = self.predict(n_periods=cv, exogenous=cv_exog) self.oob_ = scoring(cv_samples, pred, **self.scoring_args) self.oob_preds_ = pred # If we compute out of sample scores, we have to now update the # observed time points so future forecasts originate from the end # of our y vec self.update(cv_samples, cv_exog, **fit_args) else: self.oob_ = np.nan self.oob_preds_ = None return self
python
def fit(self, y, exogenous=None, **fit_args): y = c1d(check_array(y, ensure_2d=False, force_all_finite=False, copy=True, dtype=DTYPE)) # type: np.ndarray n_samples = y.shape[0] # if exog was included, check the array... if exogenous is not None: exogenous = check_array(exogenous, ensure_2d=True, force_all_finite=False, copy=False, dtype=DTYPE) # determine the CV args, if any cv = self.out_of_sample_size scoring = get_callable(self.scoring, VALID_SCORING) # don't allow negative, don't allow > n_samples cv = max(cv, 0) # if cv is too big, raise if cv >= n_samples: raise ValueError("out-of-sample size must be less than number " "of samples!") # If we want to get a score on the out-of-sample, we need to trim # down the size of our y vec for fitting. Addressed due to Issue #28 cv_samples = None cv_exog = None if cv: cv_samples = y[-cv:] y = y[:-cv] # This also means we have to address the exogenous matrix if exogenous is not None: cv_exog = exogenous[-cv:, :] exogenous = exogenous[:-cv, :] # Internal call self._fit(y, exogenous, **fit_args) # now make a forecast if we're validating to compute the # out-of-sample score if cv_samples is not None: # get the predictions (use self.predict, which calls forecast # from statsmodels internally) pred = self.predict(n_periods=cv, exogenous=cv_exog) self.oob_ = scoring(cv_samples, pred, **self.scoring_args) self.oob_preds_ = pred # If we compute out of sample scores, we have to now update the # observed time points so future forecasts originate from the end # of our y vec self.update(cv_samples, cv_exog, **fit_args) else: self.oob_ = np.nan self.oob_preds_ = None return self
[ "def", "fit", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "*", "*", "fit_args", ")", ":", "y", "=", "c1d", "(", "check_array", "(", "y", ",", "ensure_2d", "=", "False", ",", "force_all_finite", "=", "False", ",", "copy", "=", "True",...
Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the statsmodels ARIMA fit.
[ "Fit", "an", "ARIMA", "to", "a", "vector", "y", "of", "observations", "with", "an", "optional", "matrix", "of", "exogenous", "variables", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L380-L458
226,743
tgsmith61591/pmdarima
pmdarima/arima/arima.py
ARIMA.predict_in_sample
def predict_in_sample(self, exogenous=None, start=None, end=None, dynamic=False): """Generate in-sample predictions from the fit ARIMA model. This can be useful when wanting to visualize the fit, and qualitatively inspect the efficacy of the model, or when wanting to compute the residuals of the model. Parameters ---------- exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. start : int, optional (default=None) Zero-indexed observation number at which to start forecasting, ie., the first forecast is start. end : int, optional (default=None) Zero-indexed observation number at which to end forecasting, ie., the first forecast is start. dynamic : bool, optional The `dynamic` keyword affects in-sample prediction. If dynamic is False, then the in-sample lagged values are used for prediction. If `dynamic` is True, then in-sample forecasts are used in place of lagged dependent variables. The first forecasted value is `start`. Returns ------- predict : array The predicted values. """ check_is_fitted(self, 'arima_res_') # if we fit with exog, make sure one was passed: exogenous = self._check_exog(exogenous) # type: np.ndarray return self.arima_res_.predict(exog=exogenous, start=start, end=end, dynamic=dynamic)
python
def predict_in_sample(self, exogenous=None, start=None, end=None, dynamic=False): check_is_fitted(self, 'arima_res_') # if we fit with exog, make sure one was passed: exogenous = self._check_exog(exogenous) # type: np.ndarray return self.arima_res_.predict(exog=exogenous, start=start, end=end, dynamic=dynamic)
[ "def", "predict_in_sample", "(", "self", ",", "exogenous", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "dynamic", "=", "False", ")", ":", "check_is_fitted", "(", "self", ",", "'arima_res_'", ")", "# if we fit with exog, make sure one ...
Generate in-sample predictions from the fit ARIMA model. This can be useful when wanting to visualize the fit, and qualitatively inspect the efficacy of the model, or when wanting to compute the residuals of the model. Parameters ---------- exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. start : int, optional (default=None) Zero-indexed observation number at which to start forecasting, ie., the first forecast is start. end : int, optional (default=None) Zero-indexed observation number at which to end forecasting, ie., the first forecast is start. dynamic : bool, optional The `dynamic` keyword affects in-sample prediction. If dynamic is False, then the in-sample lagged values are used for prediction. If `dynamic` is True, then in-sample forecasts are used in place of lagged dependent variables. The first forecasted value is `start`. Returns ------- predict : array The predicted values.
[ "Generate", "in", "-", "sample", "predictions", "from", "the", "fit", "ARIMA", "model", ".", "This", "can", "be", "useful", "when", "wanting", "to", "visualize", "the", "fit", "and", "qualitatively", "inspect", "the", "efficacy", "of", "the", "model", "or", ...
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L472-L513
226,744
tgsmith61591/pmdarima
pmdarima/arima/arima.py
ARIMA.predict
def predict(self, n_periods=10, exogenous=None, return_conf_int=False, alpha=0.05): """Forecast future values Generate predictions (forecasts) ``n_periods`` in the future. Note that if ``exogenous`` variables were used in the model fit, they will be expected for the predict procedure and will fail otherwise. Parameters ---------- n_periods : int, optional (default=10) The number of periods in the future to forecast. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. return_conf_int : bool, optional (default=False) Whether to get the confidence intervals of the forecasts. alpha : float, optional (default=0.05) The confidence intervals for the forecasts are (1 - alpha) % Returns ------- forecasts : array-like, shape=(n_periods,) The array of fore-casted values. conf_int : array-like, shape=(n_periods, 2), optional The confidence intervals for the forecasts. Only returned if ``return_conf_int`` is True. """ check_is_fitted(self, 'arima_res_') if not isinstance(n_periods, (int, long)): raise TypeError("n_periods must be an int or a long") # if we fit with exog, make sure one was passed: exogenous = self._check_exog(exogenous) # type: np.ndarray if exogenous is not None and exogenous.shape[0] != n_periods: raise ValueError('Exogenous array dims (n_rows) != n_periods') # ARIMA/ARMA predict differently... if not self._is_seasonal(): # use the results wrapper to predict so it injects its own params # (also if I was 0, ARMA will not have a forecast method natively) f, _, conf_int = self.arima_res_.forecast( steps=n_periods, exog=exogenous, alpha=alpha) else: # SARIMAX # Unfortunately, SARIMAX does not really provide a nice way to get # the confidence intervals out of the box, so we have to perform # the get_prediction code here and unpack the confidence intervals # manually. # f = self.arima_res_.forecast(steps=n_periods, exog=exogenous) arima = self.arima_res_ end = arima.nobs + n_periods - 1 results = arima.get_prediction(start=arima.nobs, end=end, exog=exogenous) f = results.predicted_mean conf_int = results.conf_int(alpha=alpha) if return_conf_int: # The confidence intervals may be a Pandas frame if it comes from # SARIMAX & we want Numpy. We will to duck type it so we don't add # new explicit requirements for the package return f, check_array(conf_int, force_all_finite=False) return f
python
def predict(self, n_periods=10, exogenous=None, return_conf_int=False, alpha=0.05): check_is_fitted(self, 'arima_res_') if not isinstance(n_periods, (int, long)): raise TypeError("n_periods must be an int or a long") # if we fit with exog, make sure one was passed: exogenous = self._check_exog(exogenous) # type: np.ndarray if exogenous is not None and exogenous.shape[0] != n_periods: raise ValueError('Exogenous array dims (n_rows) != n_periods') # ARIMA/ARMA predict differently... if not self._is_seasonal(): # use the results wrapper to predict so it injects its own params # (also if I was 0, ARMA will not have a forecast method natively) f, _, conf_int = self.arima_res_.forecast( steps=n_periods, exog=exogenous, alpha=alpha) else: # SARIMAX # Unfortunately, SARIMAX does not really provide a nice way to get # the confidence intervals out of the box, so we have to perform # the get_prediction code here and unpack the confidence intervals # manually. # f = self.arima_res_.forecast(steps=n_periods, exog=exogenous) arima = self.arima_res_ end = arima.nobs + n_periods - 1 results = arima.get_prediction(start=arima.nobs, end=end, exog=exogenous) f = results.predicted_mean conf_int = results.conf_int(alpha=alpha) if return_conf_int: # The confidence intervals may be a Pandas frame if it comes from # SARIMAX & we want Numpy. We will to duck type it so we don't add # new explicit requirements for the package return f, check_array(conf_int, force_all_finite=False) return f
[ "def", "predict", "(", "self", ",", "n_periods", "=", "10", ",", "exogenous", "=", "None", ",", "return_conf_int", "=", "False", ",", "alpha", "=", "0.05", ")", ":", "check_is_fitted", "(", "self", ",", "'arima_res_'", ")", "if", "not", "isinstance", "("...
Forecast future values Generate predictions (forecasts) ``n_periods`` in the future. Note that if ``exogenous`` variables were used in the model fit, they will be expected for the predict procedure and will fail otherwise. Parameters ---------- n_periods : int, optional (default=10) The number of periods in the future to forecast. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. return_conf_int : bool, optional (default=False) Whether to get the confidence intervals of the forecasts. alpha : float, optional (default=0.05) The confidence intervals for the forecasts are (1 - alpha) % Returns ------- forecasts : array-like, shape=(n_periods,) The array of fore-casted values. conf_int : array-like, shape=(n_periods, 2), optional The confidence intervals for the forecasts. Only returned if ``return_conf_int`` is True.
[ "Forecast", "future", "values" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L515-L584
226,745
tgsmith61591/pmdarima
pmdarima/arima/arima.py
ARIMA.conf_int
def conf_int(self, alpha=0.05, **kwargs): r"""Returns the confidence interval of the fitted parameters. Returns ------- alpha : float, optional (default=0.05) The significance level for the confidence interval. ie., the default alpha = .05 returns a 95% confidence interval. **kwargs : keyword args or dict Keyword arguments to pass to the confidence interval function. Could include 'cols' or 'method' """ return self.arima_res_.conf_int(alpha=alpha, **kwargs)
python
def conf_int(self, alpha=0.05, **kwargs): r"""Returns the confidence interval of the fitted parameters. Returns ------- alpha : float, optional (default=0.05) The significance level for the confidence interval. ie., the default alpha = .05 returns a 95% confidence interval. **kwargs : keyword args or dict Keyword arguments to pass to the confidence interval function. Could include 'cols' or 'method' """ return self.arima_res_.conf_int(alpha=alpha, **kwargs)
[ "def", "conf_int", "(", "self", ",", "alpha", "=", "0.05", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "arima_res_", ".", "conf_int", "(", "alpha", "=", "alpha", ",", "*", "*", "kwargs", ")" ]
r"""Returns the confidence interval of the fitted parameters. Returns ------- alpha : float, optional (default=0.05) The significance level for the confidence interval. ie., the default alpha = .05 returns a 95% confidence interval. **kwargs : keyword args or dict Keyword arguments to pass to the confidence interval function. Could include 'cols' or 'method'
[ "r", "Returns", "the", "confidence", "interval", "of", "the", "fitted", "parameters", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L886-L899
226,746
tgsmith61591/pmdarima
pmdarima/arima/arima.py
ARIMA.to_dict
def to_dict(self): """Get the ARIMA model as a dictionary Return the dictionary representation of the ARIMA model Returns ------- res : dictionary The ARIMA model as a dictionary. """ return { 'pvalues': self.pvalues(), 'resid': self.resid(), 'order': self.order, 'seasonal_order': self.seasonal_order, 'oob': self.oob(), 'aic': self.aic(), 'aicc': self.aicc(), 'bic': self.bic(), 'bse': self.bse(), 'params': self.params() }
python
def to_dict(self): return { 'pvalues': self.pvalues(), 'resid': self.resid(), 'order': self.order, 'seasonal_order': self.seasonal_order, 'oob': self.oob(), 'aic': self.aic(), 'aicc': self.aicc(), 'bic': self.bic(), 'bse': self.bse(), 'params': self.params() }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'pvalues'", ":", "self", ".", "pvalues", "(", ")", ",", "'resid'", ":", "self", ".", "resid", "(", ")", ",", "'order'", ":", "self", ".", "order", ",", "'seasonal_order'", ":", "self", ".", "...
Get the ARIMA model as a dictionary Return the dictionary representation of the ARIMA model Returns ------- res : dictionary The ARIMA model as a dictionary.
[ "Get", "the", "ARIMA", "model", "as", "a", "dictionary" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L1033-L1054
226,747
tgsmith61591/pmdarima
pmdarima/arima/arima.py
ARIMA.plot_diagnostics
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None): """Plot an ARIMA's diagnostics. Diagnostic plots for standardized residuals of one endogenous variable Parameters ---------- variable : integer, optional Index of the endogenous variable for which the diagnostic plots should be created. Default is 0. lags : integer, optional Number of lags to include in the correlogram. Default is 10. fig : Matplotlib Figure instance, optional If given, subplots are created in this figure instead of in a new figure. Note that the 2x2 grid will be created in the provided figure using `fig.add_subplot()`. figsize : tuple, optional If a figure is created, this argument allows specifying a size. The tuple is (width, height). Notes ----- Produces a 2x2 plot grid with the following plots (ordered clockwise from top left): 1. Standardized residuals over time 2. Histogram plus estimated density of standardized residulas, along with a Normal(0,1) density plotted for reference. 3. Normal Q-Q plot, with Normal reference line. 4. Correlogram See Also -------- statsmodels.graphics.gofplots.qqplot pmdarima.utils.visualization.plot_acf References ---------- .. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501 """ # implicitly checks whether installed, and does our backend magic: _get_plt() # We originally delegated down to SARIMAX model wrapper, but # statsmodels makes it difficult to trust their API, so we just re- # implemented a common method for all results wrappers. from statsmodels.graphics.utils import create_mpl_fig fig = create_mpl_fig(fig, figsize) res_wpr = self.arima_res_ data = res_wpr.data # Eliminate residuals associated with burned or diffuse likelihoods. # The statsmodels code for the Kalman Filter takes the loglik_burn # as a parameter: # loglikelihood_burn : int, optional # The number of initial periods during which the loglikelihood is # not recorded. Default is 0. # If the class has it, it's a SARIMAX and we'll use it. Otherwise we # will just access the residuals as we normally would... if hasattr(res_wpr, 'loglikelihood_burn'): # This is introduced in the bleeding edge version, but is not # backwards compatible with 0.9.0 and less: d = res_wpr.loglikelihood_burn if hasattr(res_wpr, 'nobs_diffuse'): d = np.maximum(d, res_wpr.nobs_diffuse) resid = res_wpr.filter_results\ .standardized_forecasts_error[variable, d:] else: # This gets the residuals, but they need to be standardized d = 0 r = res_wpr.resid resid = (r - np.nanmean(r)) / np.nanstd(r) # Top-left: residuals vs time ax = fig.add_subplot(221) if hasattr(data, 'dates') and data.dates is not None: x = data.dates[d:]._mpl_repr() else: x = np.arange(len(resid)) ax.plot(x, resid) ax.hlines(0, x[0], x[-1], alpha=0.5) ax.set_xlim(x[0], x[-1]) ax.set_title('Standardized residual') # Top-right: histogram, Gaussian kernel density, Normal density # Can only do histogram and Gaussian kernel density on the non-null # elements resid_nonmissing = resid[~(np.isnan(resid))] ax = fig.add_subplot(222) # temporarily disable Deprecation warning, normed -> density # hist needs to use `density` in future when minimum matplotlib has it with warnings.catch_warnings(record=True): ax.hist(resid_nonmissing, normed=True, label='Hist') kde = gaussian_kde(resid_nonmissing) xlim = (-1.96 * 2, 1.96 * 2) x = np.linspace(xlim[0], xlim[1]) ax.plot(x, kde(x), label='KDE') ax.plot(x, norm.pdf(x), label='N(0,1)') ax.set_xlim(xlim) ax.legend() ax.set_title('Histogram plus estimated density') # Bottom-left: QQ plot ax = fig.add_subplot(223) from statsmodels.graphics.gofplots import qqplot qqplot(resid_nonmissing, line='s', ax=ax) ax.set_title('Normal Q-Q') # Bottom-right: Correlogram ax = fig.add_subplot(224) from statsmodels.graphics.tsaplots import plot_acf plot_acf(resid, ax=ax, lags=lags) ax.set_title('Correlogram') ax.set_ylim(-1, 1) return fig
python
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None): # implicitly checks whether installed, and does our backend magic: _get_plt() # We originally delegated down to SARIMAX model wrapper, but # statsmodels makes it difficult to trust their API, so we just re- # implemented a common method for all results wrappers. from statsmodels.graphics.utils import create_mpl_fig fig = create_mpl_fig(fig, figsize) res_wpr = self.arima_res_ data = res_wpr.data # Eliminate residuals associated with burned or diffuse likelihoods. # The statsmodels code for the Kalman Filter takes the loglik_burn # as a parameter: # loglikelihood_burn : int, optional # The number of initial periods during which the loglikelihood is # not recorded. Default is 0. # If the class has it, it's a SARIMAX and we'll use it. Otherwise we # will just access the residuals as we normally would... if hasattr(res_wpr, 'loglikelihood_burn'): # This is introduced in the bleeding edge version, but is not # backwards compatible with 0.9.0 and less: d = res_wpr.loglikelihood_burn if hasattr(res_wpr, 'nobs_diffuse'): d = np.maximum(d, res_wpr.nobs_diffuse) resid = res_wpr.filter_results\ .standardized_forecasts_error[variable, d:] else: # This gets the residuals, but they need to be standardized d = 0 r = res_wpr.resid resid = (r - np.nanmean(r)) / np.nanstd(r) # Top-left: residuals vs time ax = fig.add_subplot(221) if hasattr(data, 'dates') and data.dates is not None: x = data.dates[d:]._mpl_repr() else: x = np.arange(len(resid)) ax.plot(x, resid) ax.hlines(0, x[0], x[-1], alpha=0.5) ax.set_xlim(x[0], x[-1]) ax.set_title('Standardized residual') # Top-right: histogram, Gaussian kernel density, Normal density # Can only do histogram and Gaussian kernel density on the non-null # elements resid_nonmissing = resid[~(np.isnan(resid))] ax = fig.add_subplot(222) # temporarily disable Deprecation warning, normed -> density # hist needs to use `density` in future when minimum matplotlib has it with warnings.catch_warnings(record=True): ax.hist(resid_nonmissing, normed=True, label='Hist') kde = gaussian_kde(resid_nonmissing) xlim = (-1.96 * 2, 1.96 * 2) x = np.linspace(xlim[0], xlim[1]) ax.plot(x, kde(x), label='KDE') ax.plot(x, norm.pdf(x), label='N(0,1)') ax.set_xlim(xlim) ax.legend() ax.set_title('Histogram plus estimated density') # Bottom-left: QQ plot ax = fig.add_subplot(223) from statsmodels.graphics.gofplots import qqplot qqplot(resid_nonmissing, line='s', ax=ax) ax.set_title('Normal Q-Q') # Bottom-right: Correlogram ax = fig.add_subplot(224) from statsmodels.graphics.tsaplots import plot_acf plot_acf(resid, ax=ax, lags=lags) ax.set_title('Correlogram') ax.set_ylim(-1, 1) return fig
[ "def", "plot_diagnostics", "(", "self", ",", "variable", "=", "0", ",", "lags", "=", "10", ",", "fig", "=", "None", ",", "figsize", "=", "None", ")", ":", "# implicitly checks whether installed, and does our backend magic:", "_get_plt", "(", ")", "# We originally ...
Plot an ARIMA's diagnostics. Diagnostic plots for standardized residuals of one endogenous variable Parameters ---------- variable : integer, optional Index of the endogenous variable for which the diagnostic plots should be created. Default is 0. lags : integer, optional Number of lags to include in the correlogram. Default is 10. fig : Matplotlib Figure instance, optional If given, subplots are created in this figure instead of in a new figure. Note that the 2x2 grid will be created in the provided figure using `fig.add_subplot()`. figsize : tuple, optional If a figure is created, this argument allows specifying a size. The tuple is (width, height). Notes ----- Produces a 2x2 plot grid with the following plots (ordered clockwise from top left): 1. Standardized residuals over time 2. Histogram plus estimated density of standardized residulas, along with a Normal(0,1) density plotted for reference. 3. Normal Q-Q plot, with Normal reference line. 4. Correlogram See Also -------- statsmodels.graphics.gofplots.qqplot pmdarima.utils.visualization.plot_acf References ---------- .. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501
[ "Plot", "an", "ARIMA", "s", "diagnostics", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L1057-L1181
226,748
tgsmith61591/pmdarima
pmdarima/preprocessing/endog/boxcox.py
BoxCoxEndogTransformer.transform
def transform(self, y, exogenous=None, **_): """Transform the new array Apply the Box-Cox transformation to the array after learning the lambda parameter. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y_transform : array-like or None The Box-Cox transformed y array exogenous : array-like or None The exog array """ check_is_fitted(self, "lam1_") lam1 = self.lam1_ lam2 = self.lam2_ y, exog = self._check_y_exog(y, exogenous) y += lam2 neg_mask = y <= 0. if neg_mask.any(): action = self.neg_action msg = "Negative or zero values present in y" if action == "raise": raise ValueError(msg) elif action == "warn": warnings.warn(msg, UserWarning) y[neg_mask] = self.floor if lam1 == 0: return np.log(y), exog return (y ** lam1 - 1) / lam1, exog
python
def transform(self, y, exogenous=None, **_): check_is_fitted(self, "lam1_") lam1 = self.lam1_ lam2 = self.lam2_ y, exog = self._check_y_exog(y, exogenous) y += lam2 neg_mask = y <= 0. if neg_mask.any(): action = self.neg_action msg = "Negative or zero values present in y" if action == "raise": raise ValueError(msg) elif action == "warn": warnings.warn(msg, UserWarning) y[neg_mask] = self.floor if lam1 == 0: return np.log(y), exog return (y ** lam1 - 1) / lam1, exog
[ "def", "transform", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "*", "*", "_", ")", ":", "check_is_fitted", "(", "self", ",", "\"lam1_\"", ")", "lam1", "=", "self", ".", "lam1_", "lam2", "=", "self", ".", "lam2_", "y", ",", "exog", ...
Transform the new array Apply the Box-Cox transformation to the array after learning the lambda parameter. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y_transform : array-like or None The Box-Cox transformed y array exogenous : array-like or None The exog array
[ "Transform", "the", "new", "array" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/preprocessing/endog/boxcox.py#L82-L125
226,749
tgsmith61591/pmdarima
pmdarima/preprocessing/endog/boxcox.py
BoxCoxEndogTransformer.inverse_transform
def inverse_transform(self, y, exogenous=None): """Inverse transform a transformed array Inverse the Box-Cox transformation on the transformed array. Note that if truncation happened in the ``transform`` method, invertibility will not be preserved, and the transformed array may not be perfectly inverse-transformed. Parameters ---------- y : array-like or None, shape=(n_samples,) The transformed endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y : array-like or None The inverse-transformed y array exogenous : array-like or None The inverse-transformed exogenous array """ check_is_fitted(self, "lam1_") lam1 = self.lam1_ lam2 = self.lam2_ y, exog = self._check_y_exog(y, exogenous) if lam1 == 0: return np.exp(y) - lam2, exog numer = y * lam1 # remove denominator numer += 1. # add 1 back to it de_exp = numer ** (1. / lam1) # de-exponentiate return de_exp - lam2, exog
python
def inverse_transform(self, y, exogenous=None): check_is_fitted(self, "lam1_") lam1 = self.lam1_ lam2 = self.lam2_ y, exog = self._check_y_exog(y, exogenous) if lam1 == 0: return np.exp(y) - lam2, exog numer = y * lam1 # remove denominator numer += 1. # add 1 back to it de_exp = numer ** (1. / lam1) # de-exponentiate return de_exp - lam2, exog
[ "def", "inverse_transform", "(", "self", ",", "y", ",", "exogenous", "=", "None", ")", ":", "check_is_fitted", "(", "self", ",", "\"lam1_\"", ")", "lam1", "=", "self", ".", "lam1_", "lam2", "=", "self", ".", "lam2_", "y", ",", "exog", "=", "self", "....
Inverse transform a transformed array Inverse the Box-Cox transformation on the transformed array. Note that if truncation happened in the ``transform`` method, invertibility will not be preserved, and the transformed array may not be perfectly inverse-transformed. Parameters ---------- y : array-like or None, shape=(n_samples,) The transformed endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y : array-like or None The inverse-transformed y array exogenous : array-like or None The inverse-transformed exogenous array
[ "Inverse", "transform", "a", "transformed", "array" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/preprocessing/endog/boxcox.py#L127-L164
226,750
tgsmith61591/pmdarima
pmdarima/arima/approx.py
_regularize
def _regularize(x, y, ties): """Regularize the values, make them ordered and remove duplicates. If the ``ties`` parameter is explicitly set to 'ordered' then order is already assumed. Otherwise, the removal process will happen. Parameters ---------- x : array-like, shape=(n_samples,) The x vector. y : array-like, shape=(n_samples,) The y vector. ties : str One of {'ordered', 'mean'}, handles the ties. """ x, y = [ column_or_1d(check_array(arr, ensure_2d=False, force_all_finite=False, dtype=DTYPE)) for arr in (x, y) ] nx = x.shape[0] if nx != y.shape[0]: raise ValueError('array dim mismatch: %i != %i' % (nx, y.shape[0])) # manipulate x if needed. if ties is 'ordered' we assume that x is # already ordered and everything has been handled already... if ties != 'ordered': o = np.argsort(x) # keep ordered with one another x = x[o] y = y[o] # what if any are the same? ux = np.unique(x) if ux.shape[0] < nx: # Do we want to warn for this? # warnings.warn('collapsing to unique "x" values') # vectorize this function to apply to each "cell" in the array def tie_apply(f, u_val): vals = y[x == u_val] # mask y where x == the unique value return f(vals) # replace the duplicates in the y array with the "tie" func func = VALID_TIES.get(ties, _identity) # maybe expensive to vectorize on the fly? Not sure; would need # to do some benchmarking. However, we need to in order to keep y # and x in scope... y = np.vectorize(tie_apply)(func, ux) # does ux need ordering? hmm.. x = ux return x, y
python
def _regularize(x, y, ties): x, y = [ column_or_1d(check_array(arr, ensure_2d=False, force_all_finite=False, dtype=DTYPE)) for arr in (x, y) ] nx = x.shape[0] if nx != y.shape[0]: raise ValueError('array dim mismatch: %i != %i' % (nx, y.shape[0])) # manipulate x if needed. if ties is 'ordered' we assume that x is # already ordered and everything has been handled already... if ties != 'ordered': o = np.argsort(x) # keep ordered with one another x = x[o] y = y[o] # what if any are the same? ux = np.unique(x) if ux.shape[0] < nx: # Do we want to warn for this? # warnings.warn('collapsing to unique "x" values') # vectorize this function to apply to each "cell" in the array def tie_apply(f, u_val): vals = y[x == u_val] # mask y where x == the unique value return f(vals) # replace the duplicates in the y array with the "tie" func func = VALID_TIES.get(ties, _identity) # maybe expensive to vectorize on the fly? Not sure; would need # to do some benchmarking. However, we need to in order to keep y # and x in scope... y = np.vectorize(tie_apply)(func, ux) # does ux need ordering? hmm.. x = ux return x, y
[ "def", "_regularize", "(", "x", ",", "y", ",", "ties", ")", ":", "x", ",", "y", "=", "[", "column_or_1d", "(", "check_array", "(", "arr", ",", "ensure_2d", "=", "False", ",", "force_all_finite", "=", "False", ",", "dtype", "=", "DTYPE", ")", ")", "...
Regularize the values, make them ordered and remove duplicates. If the ``ties`` parameter is explicitly set to 'ordered' then order is already assumed. Otherwise, the removal process will happen. Parameters ---------- x : array-like, shape=(n_samples,) The x vector. y : array-like, shape=(n_samples,) The y vector. ties : str One of {'ordered', 'mean'}, handles the ties.
[ "Regularize", "the", "values", "make", "them", "ordered", "and", "remove", "duplicates", ".", "If", "the", "ties", "parameter", "is", "explicitly", "set", "to", "ordered", "then", "order", "is", "already", "assumed", ".", "Otherwise", "the", "removal", "proces...
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/approx.py#L42-L100
226,751
tgsmith61591/pmdarima
pmdarima/arima/approx.py
approx
def approx(x, y, xout, method='linear', rule=1, f=0, yleft=None, yright=None, ties='mean'): """Linearly interpolate points. Return a list of points which (linearly) interpolate given data points, or a function performing the linear (or constant) interpolation. Parameters ---------- x : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. y : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. xout : int, float or iterable A scalar or iterable of numeric values specifying where interpolation is to take place. method : str, optional (default='linear') Specifies the interpolation method to be used. Choices are "linear" or "constant". rule : int, optional (default=1) An integer describing how interpolation is to take place outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then np.nans are returned for such points and if it is 2, the value at the closest data extreme is used. f : int, optional (default=0) For ``method`` = "constant" a number between 0 and 1 inclusive, indicating a compromise between left- and right-continuous step functions. If y0 and y1 are the values to the left and right of the point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f for intermediate values. In this way the result is right-continuous for f == 0 and left-continuous for f == 1, even for non-finite ``y`` values. yleft : float, optional (default=None) The value to be returned when input ``x`` values are less than ``min(x)``. The default is defined by the value of rule given below. yright : float, optional (default=None) The value to be returned when input ``x`` values are greater than ``max(x)``. The default is defined by the value of rule given below. ties : str, optional (default='mean') Handling of tied ``x`` values. Choices are "mean" or "ordered". """ if method not in VALID_APPROX: raise ValueError('method must be one of %r' % VALID_APPROX) # make sure xout is an array xout = c(xout).astype(np.float64) # ensure double # check method method_key = method # not a callable, actually, but serves the purpose.. method = get_callable(method_key, VALID_APPROX) # copy/regularize vectors x, y = _regularize(x, y, ties) nx = x.shape[0] # if len 1? (we've already handled where the size is 0, since we check that # in the _regularize function when we call c1d) if nx == 1: if method_key == 'linear': raise ValueError('need at least two points to ' 'linearly interpolate') # get yleft, yright if yleft is None: yleft = y[0] if rule != 1 else np.nan if yright is None: yright = y[-1] if rule != 1 else np.nan # call the C subroutine yout = C_Approx(x, y, xout, method, f, yleft, yright) # MemoryView return xout, np.asarray(yout)
python
def approx(x, y, xout, method='linear', rule=1, f=0, yleft=None, yright=None, ties='mean'): if method not in VALID_APPROX: raise ValueError('method must be one of %r' % VALID_APPROX) # make sure xout is an array xout = c(xout).astype(np.float64) # ensure double # check method method_key = method # not a callable, actually, but serves the purpose.. method = get_callable(method_key, VALID_APPROX) # copy/regularize vectors x, y = _regularize(x, y, ties) nx = x.shape[0] # if len 1? (we've already handled where the size is 0, since we check that # in the _regularize function when we call c1d) if nx == 1: if method_key == 'linear': raise ValueError('need at least two points to ' 'linearly interpolate') # get yleft, yright if yleft is None: yleft = y[0] if rule != 1 else np.nan if yright is None: yright = y[-1] if rule != 1 else np.nan # call the C subroutine yout = C_Approx(x, y, xout, method, f, yleft, yright) # MemoryView return xout, np.asarray(yout)
[ "def", "approx", "(", "x", ",", "y", ",", "xout", ",", "method", "=", "'linear'", ",", "rule", "=", "1", ",", "f", "=", "0", ",", "yleft", "=", "None", ",", "yright", "=", "None", ",", "ties", "=", "'mean'", ")", ":", "if", "method", "not", "...
Linearly interpolate points. Return a list of points which (linearly) interpolate given data points, or a function performing the linear (or constant) interpolation. Parameters ---------- x : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. y : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. xout : int, float or iterable A scalar or iterable of numeric values specifying where interpolation is to take place. method : str, optional (default='linear') Specifies the interpolation method to be used. Choices are "linear" or "constant". rule : int, optional (default=1) An integer describing how interpolation is to take place outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then np.nans are returned for such points and if it is 2, the value at the closest data extreme is used. f : int, optional (default=0) For ``method`` = "constant" a number between 0 and 1 inclusive, indicating a compromise between left- and right-continuous step functions. If y0 and y1 are the values to the left and right of the point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f for intermediate values. In this way the result is right-continuous for f == 0 and left-continuous for f == 1, even for non-finite ``y`` values. yleft : float, optional (default=None) The value to be returned when input ``x`` values are less than ``min(x)``. The default is defined by the value of rule given below. yright : float, optional (default=None) The value to be returned when input ``x`` values are greater than ``max(x)``. The default is defined by the value of rule given below. ties : str, optional (default='mean') Handling of tied ``x`` values. Choices are "mean" or "ordered".
[ "Linearly", "interpolate", "points", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/approx.py#L103-L185
226,752
tgsmith61591/pmdarima
pmdarima/preprocessing/base.py
BaseTransformer.fit_transform
def fit_transform(self, y, exogenous=None, **transform_kwargs): """Fit and transform the arrays Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. **transform_kwargs : keyword args Keyword arguments required by the transform function. """ self.fit(y, exogenous) return self.transform(y, exogenous, **transform_kwargs)
python
def fit_transform(self, y, exogenous=None, **transform_kwargs): self.fit(y, exogenous) return self.transform(y, exogenous, **transform_kwargs)
[ "def", "fit_transform", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "*", "*", "transform_kwargs", ")", ":", "self", ".", "fit", "(", "y", ",", "exogenous", ")", "return", "self", ".", "transform", "(", "y", ",", "exogenous", ",", "*", ...
Fit and transform the arrays Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. **transform_kwargs : keyword args Keyword arguments required by the transform function.
[ "Fit", "and", "transform", "the", "arrays" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/preprocessing/base.py#L48-L63
226,753
tgsmith61591/pmdarima
pmdarima/pipeline.py
Pipeline.fit
def fit(self, y, exogenous=None, **fit_kwargs): """Fit the pipeline of transformers and the ARIMA model Chain the time-series and exogenous arrays through a series of transformations, fitting each stage along the way, finally fitting an ARIMA or AutoARIMA model. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_kwargs : keyword args Extra keyword arguments used for each stage's ``fit`` stage. Similar to scikit-learn pipeline keyword args, the keys are compound, comprised of the stage name and the argument name separated by a "__". For instance, if fitting an ARIMA in stage "arima", your kwargs may resemble:: {"arima__maxiter": 10} """ # Shallow copy steps = self.steps_ = self._validate_steps() yt = y Xt = exogenous named_kwargs = self._get_kwargs(**fit_kwargs) for step_idx, name, transformer in self._iter(with_final=False): cloned_transformer = clone(transformer) kwargs = named_kwargs[name] yt, Xt = cloned_transformer.fit_transform(yt, Xt, **kwargs) # Replace the transformer of the step with the fitted # transformer. steps[step_idx] = (name, cloned_transformer) # Now fit the final estimator kwargs = named_kwargs[steps[-1][0]] self._final_estimator.fit(yt, exogenous=Xt, **kwargs) return self
python
def fit(self, y, exogenous=None, **fit_kwargs): # Shallow copy steps = self.steps_ = self._validate_steps() yt = y Xt = exogenous named_kwargs = self._get_kwargs(**fit_kwargs) for step_idx, name, transformer in self._iter(with_final=False): cloned_transformer = clone(transformer) kwargs = named_kwargs[name] yt, Xt = cloned_transformer.fit_transform(yt, Xt, **kwargs) # Replace the transformer of the step with the fitted # transformer. steps[step_idx] = (name, cloned_transformer) # Now fit the final estimator kwargs = named_kwargs[steps[-1][0]] self._final_estimator.fit(yt, exogenous=Xt, **kwargs) return self
[ "def", "fit", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "*", "*", "fit_kwargs", ")", ":", "# Shallow copy", "steps", "=", "self", ".", "steps_", "=", "self", ".", "_validate_steps", "(", ")", "yt", "=", "y", "Xt", "=", "exogenous", ...
Fit the pipeline of transformers and the ARIMA model Chain the time-series and exogenous arrays through a series of transformations, fitting each stage along the way, finally fitting an ARIMA or AutoARIMA model. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_kwargs : keyword args Extra keyword arguments used for each stage's ``fit`` stage. Similar to scikit-learn pipeline keyword args, the keys are compound, comprised of the stage name and the argument name separated by a "__". For instance, if fitting an ARIMA in stage "arima", your kwargs may resemble:: {"arima__maxiter": 10}
[ "Fit", "the", "pipeline", "of", "transformers", "and", "the", "ARIMA", "model" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/pipeline.py#L143-L194
226,754
tgsmith61591/pmdarima
pmdarima/pipeline.py
Pipeline.update
def update(self, y, exogenous=None, maxiter=None, **kwargs): """Update an ARIMA or auto-ARIMA as well as any necessary transformers Passes the newly observed values through the appropriate endog transformations, and the exogenous array through the exog transformers (updating where necessary) before finally updating the ARIMA model. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series data to add to the endogenous samples on which the ``ARIMA`` estimator was previously fit. This may either be a Pandas ``Series`` object or a numpy array. This should be a one- dimensional array of finite floats. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If the model was fit with an exogenous array of covariates, it will be required for updating the observed values. maxiter : int, optional (default=None) The number of iterations to perform when updating the model. If None, will perform ``max(5, n_samples // 10)`` iterations. **kwargs : keyword args Extra keyword arguments used for each stage's ``update`` stage. Similar to scikit-learn pipeline keyword args, the keys are compound, comprised of the stage name and the argument name separated by a "__". """ check_is_fitted(self, "steps_") # Push the arrays through all of the transformer steps that have the # appropriate update_and_transform method yt = y Xt = exogenous named_kwargs = self._get_kwargs(**kwargs) for step_idx, name, transformer in self._iter(with_final=False): kw = named_kwargs[name] if hasattr(transformer, "update_and_transform"): yt, Xt = transformer.update_and_transform( y=yt, exogenous=Xt, **kw) else: yt, Xt = transformer.transform(yt, exogenous=Xt, **kw) # Now we can update the arima nm, est = self.steps_[-1] return est.update( yt, exogenous=Xt, maxiter=maxiter, **named_kwargs[nm])
python
def update(self, y, exogenous=None, maxiter=None, **kwargs): check_is_fitted(self, "steps_") # Push the arrays through all of the transformer steps that have the # appropriate update_and_transform method yt = y Xt = exogenous named_kwargs = self._get_kwargs(**kwargs) for step_idx, name, transformer in self._iter(with_final=False): kw = named_kwargs[name] if hasattr(transformer, "update_and_transform"): yt, Xt = transformer.update_and_transform( y=yt, exogenous=Xt, **kw) else: yt, Xt = transformer.transform(yt, exogenous=Xt, **kw) # Now we can update the arima nm, est = self.steps_[-1] return est.update( yt, exogenous=Xt, maxiter=maxiter, **named_kwargs[nm])
[ "def", "update", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "maxiter", "=", "None", ",", "*", "*", "kwargs", ")", ":", "check_is_fitted", "(", "self", ",", "\"steps_\"", ")", "# Push the arrays through all of the transformer steps that have the", ...
Update an ARIMA or auto-ARIMA as well as any necessary transformers Passes the newly observed values through the appropriate endog transformations, and the exogenous array through the exog transformers (updating where necessary) before finally updating the ARIMA model. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series data to add to the endogenous samples on which the ``ARIMA`` estimator was previously fit. This may either be a Pandas ``Series`` object or a numpy array. This should be a one- dimensional array of finite floats. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If the model was fit with an exogenous array of covariates, it will be required for updating the observed values. maxiter : int, optional (default=None) The number of iterations to perform when updating the model. If None, will perform ``max(5, n_samples // 10)`` iterations. **kwargs : keyword args Extra keyword arguments used for each stage's ``update`` stage. Similar to scikit-learn pipeline keyword args, the keys are compound, comprised of the stage name and the argument name separated by a "__".
[ "Update", "an", "ARIMA", "or", "auto", "-", "ARIMA", "as", "well", "as", "any", "necessary", "transformers" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/pipeline.py#L273-L322
226,755
tgsmith61591/pmdarima
pmdarima/datasets/wineind.py
load_wineind
def load_wineind(as_series=False): """Australian total wine sales by wine makers in bottles <= 1 litre. This time-series records wine sales by Australian wine makers between Jan 1980 -- Aug 1994. This dataset is found in the R ``forecast`` package. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If True, the index will be set to the observed years/months. If False, will return a 1d numpy array. Notes ----- This is monthly data, so *m* should be set to 12 when using in a seasonal context. Examples -------- >>> from pmdarima.datasets import load_wineind >>> load_wineind() array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133, 22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125, 25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533, 23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386, 16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475, 23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191, 27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488, 22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351, 19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689, 26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001, 33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562, 27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379, 16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166, 26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316, 27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198, 19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687, 16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156, 25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735, 29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565, 26323, 23779, 27549, 29660, 23356]) >>> load_wineind(True).head() Jan 1980 15136 Feb 1980 16733 Mar 1980 20016 Apr 1980 17708 May 1980 18019 dtype: int64 References ---------- .. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/wineind # noqa: E501 Returns ------- rslt : array-like, shape=(n_samples,) The wineind dataset. There are 176 observations. """ rslt = np.array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133, 22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125, 25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533, 23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386, 16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475, 23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191, 27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488, 22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351, 19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689, 26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001, 33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562, 27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379, 16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166, 26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316, 27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198, 19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687, 16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156, 25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735, 29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565, 26323, 23779, 27549, 29660, 23356]) if not as_series: return rslt # Otherwise we want a series and have to cleverly create the index # (we don't want after aug in 1994, so trip Sep, Oct, Nov and Dec) index = [ "%s %i" % (calendar.month_abbr[i + 1], year) for year in range(1980, 1995) for i in range(12) ][:-4] return pd.Series(rslt, index=index)
python
def load_wineind(as_series=False): rslt = np.array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133, 22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125, 25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533, 23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386, 16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475, 23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191, 27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488, 22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351, 19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689, 26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001, 33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562, 27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379, 16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166, 26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316, 27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198, 19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687, 16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156, 25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735, 29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565, 26323, 23779, 27549, 29660, 23356]) if not as_series: return rslt # Otherwise we want a series and have to cleverly create the index # (we don't want after aug in 1994, so trip Sep, Oct, Nov and Dec) index = [ "%s %i" % (calendar.month_abbr[i + 1], year) for year in range(1980, 1995) for i in range(12) ][:-4] return pd.Series(rslt, index=index)
[ "def", "load_wineind", "(", "as_series", "=", "False", ")", ":", "rslt", "=", "np", ".", "array", "(", "[", "15136", ",", "16733", ",", "20016", ",", "17708", ",", "18019", ",", "19227", ",", "22893", ",", "23739", ",", "21133", ",", "22591", ",", ...
Australian total wine sales by wine makers in bottles <= 1 litre. This time-series records wine sales by Australian wine makers between Jan 1980 -- Aug 1994. This dataset is found in the R ``forecast`` package. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If True, the index will be set to the observed years/months. If False, will return a 1d numpy array. Notes ----- This is monthly data, so *m* should be set to 12 when using in a seasonal context. Examples -------- >>> from pmdarima.datasets import load_wineind >>> load_wineind() array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133, 22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125, 25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533, 23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386, 16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475, 23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191, 27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488, 22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351, 19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689, 26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001, 33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562, 27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379, 16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166, 26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316, 27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198, 19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687, 16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156, 25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735, 29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565, 26323, 23779, 27549, 29660, 23356]) >>> load_wineind(True).head() Jan 1980 15136 Feb 1980 16733 Mar 1980 20016 Apr 1980 17708 May 1980 18019 dtype: int64 References ---------- .. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/wineind # noqa: E501 Returns ------- rslt : array-like, shape=(n_samples,) The wineind dataset. There are 176 observations.
[ "Australian", "total", "wine", "sales", "by", "wine", "makers", "in", "bottles", "<", "=", "1", "litre", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/datasets/wineind.py#L19-L112
226,756
tgsmith61591/pmdarima
pmdarima/preprocessing/exog/fourier.py
FourierFeaturizer.transform
def transform(self, y, exogenous=None, n_periods=0, **_): """Create Fourier term features When an ARIMA is fit with an exogenous array, it must be forecasted with one also. Since at ``predict`` time in a pipeline we won't have ``y`` (and we may not yet have an ``exog`` array), we have to know how far into the future for which to compute Fourier terms (hence ``n_periods``). This method will compute the Fourier features for a given frequency and ``k`` term. Note that the ``y`` values are not used to compute these, so this does not pose a risk of data leakage. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. This is unused and technically optional for the Fourier terms, since it uses the pre-computed ``n`` to calculate the seasonal Fourier terms. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. If specified, the Fourier terms will be column-bound on the right side of the matrix. Otherwise, the Fourier terms will be returned as the new exogenous array. n_periods : int, optional (default=0) The number of periods in the future to forecast. If ``n_periods`` is 0, will compute the Fourier features for the training set. ``n_periods`` corresponds to the number of samples that will be returned. """ check_is_fitted(self, "p_") _, exog = self._check_y_exog(y, exogenous, null_allowed=True) if n_periods and exog is not None: if n_periods != exog.shape[0]: raise ValueError("If n_periods and exog are specified, " "n_periods must match dims of exogenous") times = np.arange(self.n_ + n_periods, dtype=np.float64) + 1 X_fourier = _fourier_terms(self.p_, times) # Maybe trim if we're in predict mode... in that case, we only keep the # last n_periods rows in the matrix we've created if n_periods: X_fourier = X_fourier[-n_periods:, :] if exog is None: exog = X_fourier else: exog = np.hstack([exog, X_fourier]) return y, exog
python
def transform(self, y, exogenous=None, n_periods=0, **_): check_is_fitted(self, "p_") _, exog = self._check_y_exog(y, exogenous, null_allowed=True) if n_periods and exog is not None: if n_periods != exog.shape[0]: raise ValueError("If n_periods and exog are specified, " "n_periods must match dims of exogenous") times = np.arange(self.n_ + n_periods, dtype=np.float64) + 1 X_fourier = _fourier_terms(self.p_, times) # Maybe trim if we're in predict mode... in that case, we only keep the # last n_periods rows in the matrix we've created if n_periods: X_fourier = X_fourier[-n_periods:, :] if exog is None: exog = X_fourier else: exog = np.hstack([exog, X_fourier]) return y, exog
[ "def", "transform", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "n_periods", "=", "0", ",", "*", "*", "_", ")", ":", "check_is_fitted", "(", "self", ",", "\"p_\"", ")", "_", ",", "exog", "=", "self", ".", "_check_y_exog", "(", "y", ...
Create Fourier term features When an ARIMA is fit with an exogenous array, it must be forecasted with one also. Since at ``predict`` time in a pipeline we won't have ``y`` (and we may not yet have an ``exog`` array), we have to know how far into the future for which to compute Fourier terms (hence ``n_periods``). This method will compute the Fourier features for a given frequency and ``k`` term. Note that the ``y`` values are not used to compute these, so this does not pose a risk of data leakage. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. This is unused and technically optional for the Fourier terms, since it uses the pre-computed ``n`` to calculate the seasonal Fourier terms. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. If specified, the Fourier terms will be column-bound on the right side of the matrix. Otherwise, the Fourier terms will be returned as the new exogenous array. n_periods : int, optional (default=0) The number of periods in the future to forecast. If ``n_periods`` is 0, will compute the Fourier features for the training set. ``n_periods`` corresponds to the number of samples that will be returned.
[ "Create", "Fourier", "term", "features" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/preprocessing/exog/fourier.py#L120-L173
226,757
tgsmith61591/pmdarima
pmdarima/preprocessing/exog/fourier.py
FourierFeaturizer.update_and_transform
def update_and_transform(self, y, exogenous, **kwargs): """Update the params and return the transformed arrays Since no parameters really get updated in the Fourier featurizer, all we do is compose forecasts for ``n_periods=len(y)`` and then update ``n_``. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features) The exogenous array of additional covariates. **kwargs : keyword args Keyword arguments required by the transform function. """ check_is_fitted(self, "p_") self._check_endog(y) _, Xt = self.transform(y, exogenous, n_periods=len(y), **kwargs) # Update this *after* getting the exog features self.n_ += len(y) return y, Xt
python
def update_and_transform(self, y, exogenous, **kwargs): check_is_fitted(self, "p_") self._check_endog(y) _, Xt = self.transform(y, exogenous, n_periods=len(y), **kwargs) # Update this *after* getting the exog features self.n_ += len(y) return y, Xt
[ "def", "update_and_transform", "(", "self", ",", "y", ",", "exogenous", ",", "*", "*", "kwargs", ")", ":", "check_is_fitted", "(", "self", ",", "\"p_\"", ")", "self", ".", "_check_endog", "(", "y", ")", "_", ",", "Xt", "=", "self", ".", "transform", ...
Update the params and return the transformed arrays Since no parameters really get updated in the Fourier featurizer, all we do is compose forecasts for ``n_periods=len(y)`` and then update ``n_``. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features) The exogenous array of additional covariates. **kwargs : keyword args Keyword arguments required by the transform function.
[ "Update", "the", "params", "and", "return", "the", "transformed", "arrays" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/preprocessing/exog/fourier.py#L175-L200
226,758
tgsmith61591/pmdarima
pmdarima/base.py
BaseARIMA.fit_predict
def fit_predict(self, y, exogenous=None, n_periods=10, **fit_args): """Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables, and then generate predictions. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. n_periods : int, optional (default=10) The number of periods in the future to forecast. fit_args : dict or kwargs, optional (default=None) Any keyword args to pass to the fit method. """ self.fit(y, exogenous, **fit_args) return self.predict(n_periods=n_periods, exogenous=exogenous)
python
def fit_predict(self, y, exogenous=None, n_periods=10, **fit_args): self.fit(y, exogenous, **fit_args) return self.predict(n_periods=n_periods, exogenous=exogenous)
[ "def", "fit_predict", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "n_periods", "=", "10", ",", "*", "*", "fit_args", ")", ":", "self", ".", "fit", "(", "y", ",", "exogenous", ",", "*", "*", "fit_args", ")", "return", "self", ".", "...
Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables, and then generate predictions. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. n_periods : int, optional (default=10) The number of periods in the future to forecast. fit_args : dict or kwargs, optional (default=None) Any keyword args to pass to the fit method.
[ "Fit", "an", "ARIMA", "to", "a", "vector", "y", "of", "observations", "with", "an", "optional", "matrix", "of", "exogenous", "variables", "and", "then", "generate", "predictions", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/base.py#L19-L47
226,759
tgsmith61591/pmdarima
pmdarima/utils/wrapped.py
inheritdoc
def inheritdoc(parent): """Inherit documentation from a parent Parameters ---------- parent : callable The parent function or class that contains the sought-after docstring. If it doesn't have a docstring, this might behave in unexpected ways. Examples -------- >>> def a(x=1): ... '''This is documentation''' ... return x ... >>> @inheritdoc(a) ... def b(x): ... return 2 * a(x) ... >>> print(b.__doc__) This is documentation >>> print(b(2)) 4 """ def wrapper(func): # Assign the parent docstring to the child func.__doc__ = parent.__doc__ @wraps(func) def caller(*args, **kwargs): return func(*args, **kwargs) return caller return wrapper
python
def inheritdoc(parent): def wrapper(func): # Assign the parent docstring to the child func.__doc__ = parent.__doc__ @wraps(func) def caller(*args, **kwargs): return func(*args, **kwargs) return caller return wrapper
[ "def", "inheritdoc", "(", "parent", ")", ":", "def", "wrapper", "(", "func", ")", ":", "# Assign the parent docstring to the child", "func", ".", "__doc__", "=", "parent", ".", "__doc__", "@", "wraps", "(", "func", ")", "def", "caller", "(", "*", "args", "...
Inherit documentation from a parent Parameters ---------- parent : callable The parent function or class that contains the sought-after docstring. If it doesn't have a docstring, this might behave in unexpected ways. Examples -------- >>> def a(x=1): ... '''This is documentation''' ... return x ... >>> @inheritdoc(a) ... def b(x): ... return 2 * a(x) ... >>> print(b.__doc__) This is documentation >>> print(b(2)) 4
[ "Inherit", "documentation", "from", "a", "parent" ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/utils/wrapped.py#L19-L53
226,760
tgsmith61591/pmdarima
pmdarima/datasets/austres.py
load_austres
def load_austres(as_series=False): """Quarterly residential data. Numbers (in thousands) of Australian residents measured quarterly from March 1971 to March 1994. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The austres vector. Examples -------- >>> from pmdarima.datasets import load_austres >>> load_austres() np.array([13067.3, 13130.5, 13198.4, 13254.2, 13303.7, 13353.9, 13409.3, 13459.2, 13504.5, 13552.6, 13614.3, 13669.5, 13722.6, 13772.1, 13832.0, 13862.6, 13893.0, 13926.8, 13968.9, 14004.7, 14033.1, 14066.0, 14110.1, 14155.6, 14192.2, 14231.7, 14281.5, 14330.3, 14359.3, 14396.6, 14430.8, 14478.4, 14515.7, 14554.9, 14602.5, 14646.4, 14695.4, 14746.6, 14807.4, 14874.4, 14923.3, 14988.7, 15054.1, 15121.7, 15184.2, 15239.3, 15288.9, 15346.2, 15393.5, 15439.0, 15483.5, 15531.5, 15579.4, 15628.5, 15677.3, 15736.7, 15788.3, 15839.7, 15900.6, 15961.5, 16018.3, 16076.9, 16139.0, 16203.0, 16263.3, 16327.9, 16398.9, 16478.3, 16538.2, 16621.6, 16697.0, 16777.2, 16833.1, 16891.6, 16956.8, 17026.3, 17085.4, 17106.9, 17169.4, 17239.4, 17292.0, 17354.2, 17414.2, 17447.3, 17482.6, 17526.0, 17568.7, 17627.1, 17661.5]) >>> load_austres(True).head() 0 13067.3 1 13130.5 2 13198.4 3 13254.2 4 13303.7 dtype: float64 Notes ----- This is quarterly data, so *m* should be set to 4 when using in a seasonal context. References ---------- .. [1] P. J. Brockwell and R. A. Davis (1996) "Introduction to Time Series and Forecasting." Springer """ rslt = np.array([13067.3, 13130.5, 13198.4, 13254.2, 13303.7, 13353.9, 13409.3, 13459.2, 13504.5, 13552.6, 13614.3, 13669.5, 13722.6, 13772.1, 13832.0, 13862.6, 13893.0, 13926.8, 13968.9, 14004.7, 14033.1, 14066.0, 14110.1, 14155.6, 14192.2, 14231.7, 14281.5, 14330.3, 14359.3, 14396.6, 14430.8, 14478.4, 14515.7, 14554.9, 14602.5, 14646.4, 14695.4, 14746.6, 14807.4, 14874.4, 14923.3, 14988.7, 15054.1, 15121.7, 15184.2, 15239.3, 15288.9, 15346.2, 15393.5, 15439.0, 15483.5, 15531.5, 15579.4, 15628.5, 15677.3, 15736.7, 15788.3, 15839.7, 15900.6, 15961.5, 16018.3, 16076.9, 16139.0, 16203.0, 16263.3, 16327.9, 16398.9, 16478.3, 16538.2, 16621.6, 16697.0, 16777.2, 16833.1, 16891.6, 16956.8, 17026.3, 17085.4, 17106.9, 17169.4, 17239.4, 17292.0, 17354.2, 17414.2, 17447.3, 17482.6, 17526.0, 17568.7, 17627.1, 17661.5]) if as_series: return pd.Series(rslt) return rslt
python
def load_austres(as_series=False): rslt = np.array([13067.3, 13130.5, 13198.4, 13254.2, 13303.7, 13353.9, 13409.3, 13459.2, 13504.5, 13552.6, 13614.3, 13669.5, 13722.6, 13772.1, 13832.0, 13862.6, 13893.0, 13926.8, 13968.9, 14004.7, 14033.1, 14066.0, 14110.1, 14155.6, 14192.2, 14231.7, 14281.5, 14330.3, 14359.3, 14396.6, 14430.8, 14478.4, 14515.7, 14554.9, 14602.5, 14646.4, 14695.4, 14746.6, 14807.4, 14874.4, 14923.3, 14988.7, 15054.1, 15121.7, 15184.2, 15239.3, 15288.9, 15346.2, 15393.5, 15439.0, 15483.5, 15531.5, 15579.4, 15628.5, 15677.3, 15736.7, 15788.3, 15839.7, 15900.6, 15961.5, 16018.3, 16076.9, 16139.0, 16203.0, 16263.3, 16327.9, 16398.9, 16478.3, 16538.2, 16621.6, 16697.0, 16777.2, 16833.1, 16891.6, 16956.8, 17026.3, 17085.4, 17106.9, 17169.4, 17239.4, 17292.0, 17354.2, 17414.2, 17447.3, 17482.6, 17526.0, 17568.7, 17627.1, 17661.5]) if as_series: return pd.Series(rslt) return rslt
[ "def", "load_austres", "(", "as_series", "=", "False", ")", ":", "rslt", "=", "np", ".", "array", "(", "[", "13067.3", ",", "13130.5", ",", "13198.4", ",", "13254.2", ",", "13303.7", ",", "13353.9", ",", "13409.3", ",", "13459.2", ",", "13504.5", ",", ...
Quarterly residential data. Numbers (in thousands) of Australian residents measured quarterly from March 1971 to March 1994. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The austres vector. Examples -------- >>> from pmdarima.datasets import load_austres >>> load_austres() np.array([13067.3, 13130.5, 13198.4, 13254.2, 13303.7, 13353.9, 13409.3, 13459.2, 13504.5, 13552.6, 13614.3, 13669.5, 13722.6, 13772.1, 13832.0, 13862.6, 13893.0, 13926.8, 13968.9, 14004.7, 14033.1, 14066.0, 14110.1, 14155.6, 14192.2, 14231.7, 14281.5, 14330.3, 14359.3, 14396.6, 14430.8, 14478.4, 14515.7, 14554.9, 14602.5, 14646.4, 14695.4, 14746.6, 14807.4, 14874.4, 14923.3, 14988.7, 15054.1, 15121.7, 15184.2, 15239.3, 15288.9, 15346.2, 15393.5, 15439.0, 15483.5, 15531.5, 15579.4, 15628.5, 15677.3, 15736.7, 15788.3, 15839.7, 15900.6, 15961.5, 16018.3, 16076.9, 16139.0, 16203.0, 16263.3, 16327.9, 16398.9, 16478.3, 16538.2, 16621.6, 16697.0, 16777.2, 16833.1, 16891.6, 16956.8, 17026.3, 17085.4, 17106.9, 17169.4, 17239.4, 17292.0, 17354.2, 17414.2, 17447.3, 17482.6, 17526.0, 17568.7, 17627.1, 17661.5]) >>> load_austres(True).head() 0 13067.3 1 13130.5 2 13198.4 3 13254.2 4 13303.7 dtype: float64 Notes ----- This is quarterly data, so *m* should be set to 4 when using in a seasonal context. References ---------- .. [1] P. J. Brockwell and R. A. Davis (1996) "Introduction to Time Series and Forecasting." Springer
[ "Quarterly", "residential", "data", "." ]
a133de78ba5bd68da9785b061f519ba28cd514cc
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/datasets/austres.py#L13-L86
226,761
pixiedust/pixiedust
pixiedust/utils/sparkJobProgressMonitor.py
SparkJobProgressMonitorOutput.display_with_id
def display_with_id(self, obj, display_id, update=False): """Create a new display with an id""" ip = get_ipython() if hasattr(ip, "kernel"): data, md = ip.display_formatter.format(obj) content = { 'data': data, 'metadata': md, 'transient': {'display_id': display_id}, } msg_type = 'update_display_data' if update else 'display_data' ip.kernel.session.send(ip.kernel.iopub_socket, msg_type, content, parent=ip.parent_header) else: display(obj)
python
def display_with_id(self, obj, display_id, update=False): ip = get_ipython() if hasattr(ip, "kernel"): data, md = ip.display_formatter.format(obj) content = { 'data': data, 'metadata': md, 'transient': {'display_id': display_id}, } msg_type = 'update_display_data' if update else 'display_data' ip.kernel.session.send(ip.kernel.iopub_socket, msg_type, content, parent=ip.parent_header) else: display(obj)
[ "def", "display_with_id", "(", "self", ",", "obj", ",", "display_id", ",", "update", "=", "False", ")", ":", "ip", "=", "get_ipython", "(", ")", "if", "hasattr", "(", "ip", ",", "\"kernel\"", ")", ":", "data", ",", "md", "=", "ip", ".", "display_form...
Create a new display with an id
[ "Create", "a", "new", "display", "with", "an", "id" ]
fd4596455fffc28d0de2f5ebc8da36ec30419bee
https://github.com/pixiedust/pixiedust/blob/fd4596455fffc28d0de2f5ebc8da36ec30419bee/pixiedust/utils/sparkJobProgressMonitor.py#L116-L129
226,762
pixiedust/pixiedust
pixiedust/utils/astParse.py
get_caller_text
def get_caller_text(frame): """ Return the expression that calls the frame """ def find_match_node(node): "Find a candidate ast node" match_node = None for chd in ast.iter_child_nodes(node): if getattr(chd, "lineno", 0) > frame.f_back.f_lineno: break match_node = node if isinstance(chd, ast.Name) and isinstance(node, ast.Call) else match_node match_node = find_match_node(chd) or match_node return match_node lines, _ = inspect.findsource(frame.f_back.f_code) match_node = find_match_node(ast.parse("".join(lines))) return unparse(match_node).strip().replace(', ', ',') if match_node is not None else None
python
def get_caller_text(frame): def find_match_node(node): "Find a candidate ast node" match_node = None for chd in ast.iter_child_nodes(node): if getattr(chd, "lineno", 0) > frame.f_back.f_lineno: break match_node = node if isinstance(chd, ast.Name) and isinstance(node, ast.Call) else match_node match_node = find_match_node(chd) or match_node return match_node lines, _ = inspect.findsource(frame.f_back.f_code) match_node = find_match_node(ast.parse("".join(lines))) return unparse(match_node).strip().replace(', ', ',') if match_node is not None else None
[ "def", "get_caller_text", "(", "frame", ")", ":", "def", "find_match_node", "(", "node", ")", ":", "\"Find a candidate ast node\"", "match_node", "=", "None", "for", "chd", "in", "ast", ".", "iter_child_nodes", "(", "node", ")", ":", "if", "getattr", "(", "c...
Return the expression that calls the frame
[ "Return", "the", "expression", "that", "calls", "the", "frame" ]
fd4596455fffc28d0de2f5ebc8da36ec30419bee
https://github.com/pixiedust/pixiedust/blob/fd4596455fffc28d0de2f5ebc8da36ec30419bee/pixiedust/utils/astParse.py#L21-L36
226,763
pixiedust/pixiedust
pixiedust/utils/astParse.py
get_matches_lineno
def get_matches_lineno(code, fn_name): "Return a list of line number corresponding to the definition of function with the name fn_name" class Walker(ast.NodeVisitor): def __init__(self): self._hits = set() #pylint: disable=E0213,E1102 def onvisit(fn): def wrap(self, node): fn(self,node) super(Walker, self).generic_visit(node) return wrap @onvisit def visit_FunctionDef(self, node): if node.name == fn_name: self._hits.add(node) return node @property def hits(self): return list(sorted([n.lineno + 1 for n in self._hits])) walker = Walker() walker.visit(ast.parse(code)) return walker.hits
python
def get_matches_lineno(code, fn_name): "Return a list of line number corresponding to the definition of function with the name fn_name" class Walker(ast.NodeVisitor): def __init__(self): self._hits = set() #pylint: disable=E0213,E1102 def onvisit(fn): def wrap(self, node): fn(self,node) super(Walker, self).generic_visit(node) return wrap @onvisit def visit_FunctionDef(self, node): if node.name == fn_name: self._hits.add(node) return node @property def hits(self): return list(sorted([n.lineno + 1 for n in self._hits])) walker = Walker() walker.visit(ast.parse(code)) return walker.hits
[ "def", "get_matches_lineno", "(", "code", ",", "fn_name", ")", ":", "class", "Walker", "(", "ast", ".", "NodeVisitor", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_hits", "=", "set", "(", ")", "#pylint: disable=E0213,E1102 ", "def"...
Return a list of line number corresponding to the definition of function with the name fn_name
[ "Return", "a", "list", "of", "line", "number", "corresponding", "to", "the", "definition", "of", "function", "with", "the", "name", "fn_name" ]
fd4596455fffc28d0de2f5ebc8da36ec30419bee
https://github.com/pixiedust/pixiedust/blob/fd4596455fffc28d0de2f5ebc8da36ec30419bee/pixiedust/utils/astParse.py#L78-L103
226,764
pndurette/gTTS
gtts/lang.py
tts_langs
def tts_langs(): """Languages Google Text-to-Speech supports. Returns: dict: A dictionnary of the type `{ '<lang>': '<name>'}` Where `<lang>` is an IETF language tag such as `en` or `pt-br`, and `<name>` is the full English name of the language, such as `English` or `Portuguese (Brazil)`. The dictionnary returned combines languages from two origins: - Languages fetched automatically from Google Translate - Languages that are undocumented variations that were observed to work and present different dialects or accents. """ try: langs = dict() langs.update(_fetch_langs()) langs.update(_extra_langs()) log.debug("langs: %s", langs) return langs except Exception as e: raise RuntimeError("Unable to get language list: %s" % str(e))
python
def tts_langs(): try: langs = dict() langs.update(_fetch_langs()) langs.update(_extra_langs()) log.debug("langs: %s", langs) return langs except Exception as e: raise RuntimeError("Unable to get language list: %s" % str(e))
[ "def", "tts_langs", "(", ")", ":", "try", ":", "langs", "=", "dict", "(", ")", "langs", ".", "update", "(", "_fetch_langs", "(", ")", ")", "langs", ".", "update", "(", "_extra_langs", "(", ")", ")", "log", ".", "debug", "(", "\"langs: %s\"", ",", "...
Languages Google Text-to-Speech supports. Returns: dict: A dictionnary of the type `{ '<lang>': '<name>'}` Where `<lang>` is an IETF language tag such as `en` or `pt-br`, and `<name>` is the full English name of the language, such as `English` or `Portuguese (Brazil)`. The dictionnary returned combines languages from two origins: - Languages fetched automatically from Google Translate - Languages that are undocumented variations that were observed to work and present different dialects or accents.
[ "Languages", "Google", "Text", "-", "to", "-", "Speech", "supports", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/lang.py#L17-L41
226,765
pndurette/gTTS
gtts/tokenizer/pre_processors.py
tone_marks
def tone_marks(text): """Add a space after tone-modifying punctuation. Because the `tone_marks` tokenizer case will split after a tone-modidfying punctuation mark, make sure there's whitespace after. """ return PreProcessorRegex( search_args=symbols.TONE_MARKS, search_func=lambda x: u"(?<={})".format(x), repl=' ').run(text)
python
def tone_marks(text): return PreProcessorRegex( search_args=symbols.TONE_MARKS, search_func=lambda x: u"(?<={})".format(x), repl=' ').run(text)
[ "def", "tone_marks", "(", "text", ")", ":", "return", "PreProcessorRegex", "(", "search_args", "=", "symbols", ".", "TONE_MARKS", ",", "search_func", "=", "lambda", "x", ":", "u\"(?<={})\"", ".", "format", "(", "x", ")", ",", "repl", "=", "' '", ")", "."...
Add a space after tone-modifying punctuation. Because the `tone_marks` tokenizer case will split after a tone-modidfying punctuation mark, make sure there's whitespace after.
[ "Add", "a", "space", "after", "tone", "-", "modifying", "punctuation", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/pre_processors.py#L6-L16
226,766
pndurette/gTTS
gtts/tokenizer/pre_processors.py
end_of_line
def end_of_line(text): """Re-form words cut by end-of-line hyphens. Remove "<hyphen><newline>". """ return PreProcessorRegex( search_args=u'-', search_func=lambda x: u"{}\n".format(x), repl='').run(text)
python
def end_of_line(text): return PreProcessorRegex( search_args=u'-', search_func=lambda x: u"{}\n".format(x), repl='').run(text)
[ "def", "end_of_line", "(", "text", ")", ":", "return", "PreProcessorRegex", "(", "search_args", "=", "u'-'", ",", "search_func", "=", "lambda", "x", ":", "u\"{}\\n\"", ".", "format", "(", "x", ")", ",", "repl", "=", "''", ")", ".", "run", "(", "text", ...
Re-form words cut by end-of-line hyphens. Remove "<hyphen><newline>".
[ "Re", "-", "form", "words", "cut", "by", "end", "-", "of", "-", "line", "hyphens", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/pre_processors.py#L19-L28
226,767
pndurette/gTTS
gtts/tokenizer/pre_processors.py
abbreviations
def abbreviations(text): """Remove periods after an abbreviation from a list of known abbrevations that can be spoken the same without that period. This prevents having to handle tokenization of that period. Note: Could potentially remove the ending period of a sentence. Note: Abbreviations that Google Translate can't pronounce without (or even with) a period should be added as a word substitution with a :class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'. """ return PreProcessorRegex( search_args=symbols.ABBREVIATIONS, search_func=lambda x: r"(?<={})(?=\.).".format(x), repl='', flags=re.IGNORECASE).run(text)
python
def abbreviations(text): return PreProcessorRegex( search_args=symbols.ABBREVIATIONS, search_func=lambda x: r"(?<={})(?=\.).".format(x), repl='', flags=re.IGNORECASE).run(text)
[ "def", "abbreviations", "(", "text", ")", ":", "return", "PreProcessorRegex", "(", "search_args", "=", "symbols", ".", "ABBREVIATIONS", ",", "search_func", "=", "lambda", "x", ":", "r\"(?<={})(?=\\.).\"", ".", "format", "(", "x", ")", ",", "repl", "=", "''",...
Remove periods after an abbreviation from a list of known abbrevations that can be spoken the same without that period. This prevents having to handle tokenization of that period. Note: Could potentially remove the ending period of a sentence. Note: Abbreviations that Google Translate can't pronounce without (or even with) a period should be added as a word substitution with a :class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
[ "Remove", "periods", "after", "an", "abbreviation", "from", "a", "list", "of", "known", "abbrevations", "that", "can", "be", "spoken", "the", "same", "without", "that", "period", ".", "This", "prevents", "having", "to", "handle", "tokenization", "of", "that", ...
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/pre_processors.py#L31-L48
226,768
pndurette/gTTS
gtts/tokenizer/tokenizer_cases.py
tone_marks
def tone_marks(): """Keep tone-modifying punctuation by matching following character. Assumes the `tone_marks` pre-processor was run for cases where there might not be any space after a tone-modifying punctuation mark. """ return RegexBuilder( pattern_args=symbols.TONE_MARKS, pattern_func=lambda x: u"(?<={}).".format(x)).regex
python
def tone_marks(): return RegexBuilder( pattern_args=symbols.TONE_MARKS, pattern_func=lambda x: u"(?<={}).".format(x)).regex
[ "def", "tone_marks", "(", ")", ":", "return", "RegexBuilder", "(", "pattern_args", "=", "symbols", ".", "TONE_MARKS", ",", "pattern_func", "=", "lambda", "x", ":", "u\"(?<={}).\"", ".", "format", "(", "x", ")", ")", ".", "regex" ]
Keep tone-modifying punctuation by matching following character. Assumes the `tone_marks` pre-processor was run for cases where there might not be any space after a tone-modifying punctuation mark.
[ "Keep", "tone", "-", "modifying", "punctuation", "by", "matching", "following", "character", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L5-L13
226,769
pndurette/gTTS
gtts/tokenizer/tokenizer_cases.py
period_comma
def period_comma(): """Period and comma case. Match if not preceded by ".<letter>" and only if followed by space. Won't cut in the middle/after dotted abbreviations; won't cut numbers. Note: Won't match if a dotted abbreviation ends a sentence. Note: Won't match the end of a sentence if not followed by a space. """ return RegexBuilder( pattern_args=symbols.PERIOD_COMMA, pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex
python
def period_comma(): return RegexBuilder( pattern_args=symbols.PERIOD_COMMA, pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex
[ "def", "period_comma", "(", ")", ":", "return", "RegexBuilder", "(", "pattern_args", "=", "symbols", ".", "PERIOD_COMMA", ",", "pattern_func", "=", "lambda", "x", ":", "r\"(?<!\\.[a-z]){} \"", ".", "format", "(", "x", ")", ")", ".", "regex" ]
Period and comma case. Match if not preceded by ".<letter>" and only if followed by space. Won't cut in the middle/after dotted abbreviations; won't cut numbers. Note: Won't match if a dotted abbreviation ends a sentence. Note: Won't match the end of a sentence if not followed by a space.
[ "Period", "and", "comma", "case", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L16-L31
226,770
pndurette/gTTS
gtts/tokenizer/tokenizer_cases.py
colon
def colon(): """Colon case. Match a colon ":" only if not preceeded by a digit. Mainly to prevent a cut in the middle of time notations e.g. 10:01 """ return RegexBuilder( pattern_args=symbols.COLON, pattern_func=lambda x: r"(?<!\d){}".format(x)).regex
python
def colon(): return RegexBuilder( pattern_args=symbols.COLON, pattern_func=lambda x: r"(?<!\d){}".format(x)).regex
[ "def", "colon", "(", ")", ":", "return", "RegexBuilder", "(", "pattern_args", "=", "symbols", ".", "COLON", ",", "pattern_func", "=", "lambda", "x", ":", "r\"(?<!\\d){}\"", ".", "format", "(", "x", ")", ")", ".", "regex" ]
Colon case. Match a colon ":" only if not preceeded by a digit. Mainly to prevent a cut in the middle of time notations e.g. 10:01
[ "Colon", "case", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L34-L43
226,771
pndurette/gTTS
gtts/tokenizer/tokenizer_cases.py
other_punctuation
def other_punctuation(): """Match other punctuation. Match other punctuation to split on; punctuation that naturally inserts a break in speech. """ punc = ''.join( set(symbols.ALL_PUNC) - set(symbols.TONE_MARKS) - set(symbols.PERIOD_COMMA) - set(symbols.COLON)) return RegexBuilder( pattern_args=punc, pattern_func=lambda x: u"{}".format(x)).regex
python
def other_punctuation(): punc = ''.join( set(symbols.ALL_PUNC) - set(symbols.TONE_MARKS) - set(symbols.PERIOD_COMMA) - set(symbols.COLON)) return RegexBuilder( pattern_args=punc, pattern_func=lambda x: u"{}".format(x)).regex
[ "def", "other_punctuation", "(", ")", ":", "punc", "=", "''", ".", "join", "(", "set", "(", "symbols", ".", "ALL_PUNC", ")", "-", "set", "(", "symbols", ".", "TONE_MARKS", ")", "-", "set", "(", "symbols", ".", "PERIOD_COMMA", ")", "-", "set", "(", ...
Match other punctuation. Match other punctuation to split on; punctuation that naturally inserts a break in speech.
[ "Match", "other", "punctuation", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L46-L60
226,772
pndurette/gTTS
gtts/tokenizer/tokenizer_cases.py
legacy_all_punctuation
def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯ """Match all punctuation. Use as only tokenizer case to mimic gTTS 1.x tokenization. """ punc = symbols.ALL_PUNC return RegexBuilder( pattern_args=punc, pattern_func=lambda x: u"{}".format(x)).regex
python
def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯ punc = symbols.ALL_PUNC return RegexBuilder( pattern_args=punc, pattern_func=lambda x: u"{}".format(x)).regex
[ "def", "legacy_all_punctuation", "(", ")", ":", "# pragma: no cover b/c tested but Coveralls: ¯\\_(ツ)_/¯", "punc", "=", "symbols", ".", "ALL_PUNC", "return", "RegexBuilder", "(", "pattern_args", "=", "punc", ",", "pattern_func", "=", "lambda", "x", ":", "u\"{}\"", "."...
Match all punctuation. Use as only tokenizer case to mimic gTTS 1.x tokenization.
[ "Match", "all", "punctuation", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L63-L71
226,773
pndurette/gTTS
gtts/tts.py
gTTS.write_to_fp
def write_to_fp(self, fp): """Do the TTS API request and write bytes to a file-like object. Args: fp (file object): Any file-like object to write the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request. TypeError: When ``fp`` is not a file-like object that takes bytes. """ # When disabling ssl verify in requests (for proxies and firewalls), # urllib3 prints an insecure warning on stdout. We disable that. urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) text_parts = self._tokenize(self.text) log.debug("text_parts: %i", len(text_parts)) assert text_parts, 'No text to send to TTS API' for idx, part in enumerate(text_parts): try: # Calculate token part_tk = self.token.calculate_token(part) except requests.exceptions.RequestException as e: # pragma: no cover log.debug(str(e), exc_info=True) raise gTTSError( "Connection error during token calculation: %s" % str(e)) payload = {'ie': 'UTF-8', 'q': part, 'tl': self.lang, 'ttsspeed': self.speed, 'total': len(text_parts), 'idx': idx, 'client': 'tw-ob', 'textlen': _len(part), 'tk': part_tk} log.debug("payload-%i: %s", idx, payload) try: # Request r = requests.get(self.GOOGLE_TTS_URL, params=payload, headers=self.GOOGLE_TTS_HEADERS, proxies=urllib.request.getproxies(), verify=False) log.debug("headers-%i: %s", idx, r.request.headers) log.debug("url-%i: %s", idx, r.request.url) log.debug("status-%i: %s", idx, r.status_code) r.raise_for_status() except requests.exceptions.HTTPError: # Request successful, bad response raise gTTSError(tts=self, response=r) except requests.exceptions.RequestException as e: # pragma: no cover # Request failed raise gTTSError(str(e)) try: # Write for chunk in r.iter_content(chunk_size=1024): fp.write(chunk) log.debug("part-%i written to %s", idx, fp) except (AttributeError, TypeError) as e: raise TypeError( "'fp' is not a file-like object or it does not take bytes: %s" % str(e))
python
def write_to_fp(self, fp): # When disabling ssl verify in requests (for proxies and firewalls), # urllib3 prints an insecure warning on stdout. We disable that. urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) text_parts = self._tokenize(self.text) log.debug("text_parts: %i", len(text_parts)) assert text_parts, 'No text to send to TTS API' for idx, part in enumerate(text_parts): try: # Calculate token part_tk = self.token.calculate_token(part) except requests.exceptions.RequestException as e: # pragma: no cover log.debug(str(e), exc_info=True) raise gTTSError( "Connection error during token calculation: %s" % str(e)) payload = {'ie': 'UTF-8', 'q': part, 'tl': self.lang, 'ttsspeed': self.speed, 'total': len(text_parts), 'idx': idx, 'client': 'tw-ob', 'textlen': _len(part), 'tk': part_tk} log.debug("payload-%i: %s", idx, payload) try: # Request r = requests.get(self.GOOGLE_TTS_URL, params=payload, headers=self.GOOGLE_TTS_HEADERS, proxies=urllib.request.getproxies(), verify=False) log.debug("headers-%i: %s", idx, r.request.headers) log.debug("url-%i: %s", idx, r.request.url) log.debug("status-%i: %s", idx, r.status_code) r.raise_for_status() except requests.exceptions.HTTPError: # Request successful, bad response raise gTTSError(tts=self, response=r) except requests.exceptions.RequestException as e: # pragma: no cover # Request failed raise gTTSError(str(e)) try: # Write for chunk in r.iter_content(chunk_size=1024): fp.write(chunk) log.debug("part-%i written to %s", idx, fp) except (AttributeError, TypeError) as e: raise TypeError( "'fp' is not a file-like object or it does not take bytes: %s" % str(e))
[ "def", "write_to_fp", "(", "self", ",", "fp", ")", ":", "# When disabling ssl verify in requests (for proxies and firewalls),", "# urllib3 prints an insecure warning on stdout. We disable that.", "urllib3", ".", "disable_warnings", "(", "urllib3", ".", "exceptions", ".", "Insecur...
Do the TTS API request and write bytes to a file-like object. Args: fp (file object): Any file-like object to write the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request. TypeError: When ``fp`` is not a file-like object that takes bytes.
[ "Do", "the", "TTS", "API", "request", "and", "write", "bytes", "to", "a", "file", "-", "like", "object", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tts.py#L167-L236
226,774
pndurette/gTTS
gtts/tts.py
gTTS.save
def save(self, savefile): """Do the TTS API request and write result to file. Args: savefile (string): The path and file name to save the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request. """ with open(str(savefile), 'wb') as f: self.write_to_fp(f) log.debug("Saved to %s", savefile)
python
def save(self, savefile): with open(str(savefile), 'wb') as f: self.write_to_fp(f) log.debug("Saved to %s", savefile)
[ "def", "save", "(", "self", ",", "savefile", ")", ":", "with", "open", "(", "str", "(", "savefile", ")", ",", "'wb'", ")", "as", "f", ":", "self", ".", "write_to_fp", "(", "f", ")", "log", ".", "debug", "(", "\"Saved to %s\"", ",", "savefile", ")" ...
Do the TTS API request and write result to file. Args: savefile (string): The path and file name to save the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request.
[ "Do", "the", "TTS", "API", "request", "and", "write", "result", "to", "file", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tts.py#L238-L250
226,775
pndurette/gTTS
gtts/utils.py
_minimize
def _minimize(the_string, delim, max_size): """Recursively split a string in the largest chunks possible from the highest position of a delimiter all the way to a maximum size Args: the_string (string): The string to split. delim (string): The delimiter to split on. max_size (int): The maximum size of a chunk. Returns: list: the minimized string in tokens Every chunk size will be at minimum `the_string[0:idx]` where `idx` is the highest index of `delim` found in `the_string`; and at maximum `the_string[0:max_size]` if no `delim` was found in `the_string`. In the latter case, the split will occur at `the_string[max_size]` which can be any character. The function runs itself again on the rest of `the_string` (`the_string[idx:]`) until no chunk is larger than `max_size`. """ # Remove `delim` from start of `the_string` # i.e. prevent a recursive infinite loop on `the_string[0:0]` # if `the_string` starts with `delim` and is larger than `max_size` if the_string.startswith(delim): the_string = the_string[_len(delim):] if _len(the_string) > max_size: try: # Find the highest index of `delim` in `the_string[0:max_size]` # i.e. `the_string` will be cut in half on `delim` index idx = the_string.rindex(delim, 0, max_size) except ValueError: # `delim` not found in `the_string`, index becomes `max_size` # i.e. `the_string` will be cut in half arbitrarily on `max_size` idx = max_size # Call itself again for `the_string[idx:]` return [the_string[:idx]] + \ _minimize(the_string[idx:], delim, max_size) else: return [the_string]
python
def _minimize(the_string, delim, max_size): # Remove `delim` from start of `the_string` # i.e. prevent a recursive infinite loop on `the_string[0:0]` # if `the_string` starts with `delim` and is larger than `max_size` if the_string.startswith(delim): the_string = the_string[_len(delim):] if _len(the_string) > max_size: try: # Find the highest index of `delim` in `the_string[0:max_size]` # i.e. `the_string` will be cut in half on `delim` index idx = the_string.rindex(delim, 0, max_size) except ValueError: # `delim` not found in `the_string`, index becomes `max_size` # i.e. `the_string` will be cut in half arbitrarily on `max_size` idx = max_size # Call itself again for `the_string[idx:]` return [the_string[:idx]] + \ _minimize(the_string[idx:], delim, max_size) else: return [the_string]
[ "def", "_minimize", "(", "the_string", ",", "delim", ",", "max_size", ")", ":", "# Remove `delim` from start of `the_string`", "# i.e. prevent a recursive infinite loop on `the_string[0:0]`", "# if `the_string` starts with `delim` and is larger than `max_size`", "if", "the_string", "."...
Recursively split a string in the largest chunks possible from the highest position of a delimiter all the way to a maximum size Args: the_string (string): The string to split. delim (string): The delimiter to split on. max_size (int): The maximum size of a chunk. Returns: list: the minimized string in tokens Every chunk size will be at minimum `the_string[0:idx]` where `idx` is the highest index of `delim` found in `the_string`; and at maximum `the_string[0:max_size]` if no `delim` was found in `the_string`. In the latter case, the split will occur at `the_string[max_size]` which can be any character. The function runs itself again on the rest of `the_string` (`the_string[idx:]`) until no chunk is larger than `max_size`.
[ "Recursively", "split", "a", "string", "in", "the", "largest", "chunks", "possible", "from", "the", "highest", "position", "of", "a", "delimiter", "all", "the", "way", "to", "a", "maximum", "size" ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/utils.py#L13-L53
226,776
pndurette/gTTS
gtts/tokenizer/core.py
PreProcessorRegex.run
def run(self, text): """Run each regex substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied. """ for regex in self.regexes: text = regex.sub(self.repl, text) return text
python
def run(self, text): for regex in self.regexes: text = regex.sub(self.repl, text) return text
[ "def", "run", "(", "self", ",", "text", ")", ":", "for", "regex", "in", "self", ".", "regexes", ":", "text", "=", "regex", ".", "sub", "(", "self", ".", "repl", ",", "text", ")", "return", "text" ]
Run each regex substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied.
[ "Run", "each", "regex", "substitution", "on", "text", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/core.py#L127-L140
226,777
pndurette/gTTS
gtts/tokenizer/core.py
PreProcessorSub.run
def run(self, text): """Run each substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied. """ for pp in self.pre_processors: text = pp.run(text) return text
python
def run(self, text): for pp in self.pre_processors: text = pp.run(text) return text
[ "def", "run", "(", "self", ",", "text", ")", ":", "for", "pp", "in", "self", ".", "pre_processors", ":", "text", "=", "pp", ".", "run", "(", "text", ")", "return", "text" ]
Run each substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied.
[ "Run", "each", "substitution", "on", "text", "." ]
b01ac4eb22d40c6241202e202d0418ccf4f98460
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/core.py#L196-L209
226,778
jamesturk/jellyfish
jellyfish/porter.py
Stemmer.vowel_in_stem
def vowel_in_stem(self): """ True iff 0...j contains vowel """ for i in range(0, self.j+1): if not self.cons(i): return True return False
python
def vowel_in_stem(self): for i in range(0, self.j+1): if not self.cons(i): return True return False
[ "def", "vowel_in_stem", "(", "self", ")", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "j", "+", "1", ")", ":", "if", "not", "self", ".", "cons", "(", "i", ")", ":", "return", "True", "return", "False" ]
True iff 0...j contains vowel
[ "True", "iff", "0", "...", "j", "contains", "vowel" ]
699727a6d3ba0ba78a19d70745458d592c140203
https://github.com/jamesturk/jellyfish/blob/699727a6d3ba0ba78a19d70745458d592c140203/jellyfish/porter.py#L95-L100
226,779
jamesturk/jellyfish
jellyfish/porter.py
Stemmer.doublec
def doublec(self, j): """ True iff j, j-1 contains double consonant """ if j < 1 or self.b[j] != self.b[j-1]: return False return self.cons(j)
python
def doublec(self, j): if j < 1 or self.b[j] != self.b[j-1]: return False return self.cons(j)
[ "def", "doublec", "(", "self", ",", "j", ")", ":", "if", "j", "<", "1", "or", "self", ".", "b", "[", "j", "]", "!=", "self", ".", "b", "[", "j", "-", "1", "]", ":", "return", "False", "return", "self", ".", "cons", "(", "j", ")" ]
True iff j, j-1 contains double consonant
[ "True", "iff", "j", "j", "-", "1", "contains", "double", "consonant" ]
699727a6d3ba0ba78a19d70745458d592c140203
https://github.com/jamesturk/jellyfish/blob/699727a6d3ba0ba78a19d70745458d592c140203/jellyfish/porter.py#L102-L106
226,780
jamesturk/jellyfish
jellyfish/porter.py
Stemmer.ends
def ends(self, s): length = len(s) """ True iff 0...k ends with string s """ res = (self.b[self.k-length+1:self.k+1] == s) if res: self.j = self.k - length return res
python
def ends(self, s): length = len(s) res = (self.b[self.k-length+1:self.k+1] == s) if res: self.j = self.k - length return res
[ "def", "ends", "(", "self", ",", "s", ")", ":", "length", "=", "len", "(", "s", ")", "res", "=", "(", "self", ".", "b", "[", "self", ".", "k", "-", "length", "+", "1", ":", "self", ".", "k", "+", "1", "]", "==", "s", ")", "if", "res", "...
True iff 0...k ends with string s
[ "True", "iff", "0", "...", "k", "ends", "with", "string", "s" ]
699727a6d3ba0ba78a19d70745458d592c140203
https://github.com/jamesturk/jellyfish/blob/699727a6d3ba0ba78a19d70745458d592c140203/jellyfish/porter.py#L118-L124
226,781
jamesturk/jellyfish
jellyfish/porter.py
Stemmer.setto
def setto(self, s): """ set j+1...k to string s, readjusting k """ length = len(s) self.b[self.j+1:self.j+1+length] = s self.k = self.j + length
python
def setto(self, s): length = len(s) self.b[self.j+1:self.j+1+length] = s self.k = self.j + length
[ "def", "setto", "(", "self", ",", "s", ")", ":", "length", "=", "len", "(", "s", ")", "self", ".", "b", "[", "self", ".", "j", "+", "1", ":", "self", ".", "j", "+", "1", "+", "length", "]", "=", "s", "self", ".", "k", "=", "self", ".", ...
set j+1...k to string s, readjusting k
[ "set", "j", "+", "1", "...", "k", "to", "string", "s", "readjusting", "k" ]
699727a6d3ba0ba78a19d70745458d592c140203
https://github.com/jamesturk/jellyfish/blob/699727a6d3ba0ba78a19d70745458d592c140203/jellyfish/porter.py#L126-L130
226,782
jamesturk/jellyfish
jellyfish/porter.py
Stemmer.step1c
def step1c(self): """ turn terminal y into i if there's a vowel in stem """ if self.ends(['y']) and self.vowel_in_stem(): self.b[self.k] = 'i'
python
def step1c(self): if self.ends(['y']) and self.vowel_in_stem(): self.b[self.k] = 'i'
[ "def", "step1c", "(", "self", ")", ":", "if", "self", ".", "ends", "(", "[", "'y'", "]", ")", "and", "self", ".", "vowel_in_stem", "(", ")", ":", "self", ".", "b", "[", "self", ".", "k", "]", "=", "'i'" ]
turn terminal y into i if there's a vowel in stem
[ "turn", "terminal", "y", "into", "i", "if", "there", "s", "a", "vowel", "in", "stem" ]
699727a6d3ba0ba78a19d70745458d592c140203
https://github.com/jamesturk/jellyfish/blob/699727a6d3ba0ba78a19d70745458d592c140203/jellyfish/porter.py#L163-L166
226,783
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.setup_layout
def setup_layout(self, board_layout): """ Setup the Pin instances based on the given board layout. """ # Create pin instances based on board layout self.analog = [] for i in board_layout['analog']: self.analog.append(Pin(self, i)) self.digital = [] self.digital_ports = [] for i in range(0, len(board_layout['digital']), 8): num_pins = len(board_layout['digital'][i:i + 8]) port_number = int(i / 8) self.digital_ports.append(Port(self, port_number, num_pins)) # Allow to access the Pin instances directly for port in self.digital_ports: self.digital += port.pins # Setup PWM pins for i in board_layout['pwm']: self.digital[i].PWM_CAPABLE = True # Disable certain ports like Rx/Tx and crystal ports for i in board_layout['disabled']: self.digital[i].mode = UNAVAILABLE # Create a dictionary of 'taken' pins. Used by the get_pin method self.taken = {'analog': dict(map(lambda p: (p.pin_number, False), self.analog)), 'digital': dict(map(lambda p: (p.pin_number, False), self.digital))} self._set_default_handlers()
python
def setup_layout(self, board_layout): # Create pin instances based on board layout self.analog = [] for i in board_layout['analog']: self.analog.append(Pin(self, i)) self.digital = [] self.digital_ports = [] for i in range(0, len(board_layout['digital']), 8): num_pins = len(board_layout['digital'][i:i + 8]) port_number = int(i / 8) self.digital_ports.append(Port(self, port_number, num_pins)) # Allow to access the Pin instances directly for port in self.digital_ports: self.digital += port.pins # Setup PWM pins for i in board_layout['pwm']: self.digital[i].PWM_CAPABLE = True # Disable certain ports like Rx/Tx and crystal ports for i in board_layout['disabled']: self.digital[i].mode = UNAVAILABLE # Create a dictionary of 'taken' pins. Used by the get_pin method self.taken = {'analog': dict(map(lambda p: (p.pin_number, False), self.analog)), 'digital': dict(map(lambda p: (p.pin_number, False), self.digital))} self._set_default_handlers()
[ "def", "setup_layout", "(", "self", ",", "board_layout", ")", ":", "# Create pin instances based on board layout", "self", ".", "analog", "=", "[", "]", "for", "i", "in", "board_layout", "[", "'analog'", "]", ":", "self", ".", "analog", ".", "append", "(", "...
Setup the Pin instances based on the given board layout.
[ "Setup", "the", "Pin", "instances", "based", "on", "the", "given", "board", "layout", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L125-L157
226,784
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.auto_setup
def auto_setup(self): """ Automatic setup based on Firmata's "Capability Query" """ self.add_cmd_handler(CAPABILITY_RESPONSE, self._handle_report_capability_response) self.send_sysex(CAPABILITY_QUERY, []) self.pass_time(0.1) # Serial SYNC while self.bytes_available(): self.iterate() # handle_report_capability_response will write self._layout if self._layout: self.setup_layout(self._layout) else: raise IOError("Board detection failed.")
python
def auto_setup(self): self.add_cmd_handler(CAPABILITY_RESPONSE, self._handle_report_capability_response) self.send_sysex(CAPABILITY_QUERY, []) self.pass_time(0.1) # Serial SYNC while self.bytes_available(): self.iterate() # handle_report_capability_response will write self._layout if self._layout: self.setup_layout(self._layout) else: raise IOError("Board detection failed.")
[ "def", "auto_setup", "(", "self", ")", ":", "self", ".", "add_cmd_handler", "(", "CAPABILITY_RESPONSE", ",", "self", ".", "_handle_report_capability_response", ")", "self", ".", "send_sysex", "(", "CAPABILITY_QUERY", ",", "[", "]", ")", "self", ".", "pass_time",...
Automatic setup based on Firmata's "Capability Query"
[ "Automatic", "setup", "based", "on", "Firmata", "s", "Capability", "Query" ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L166-L181
226,785
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.add_cmd_handler
def add_cmd_handler(self, cmd, func): """Adds a command handler for a command.""" len_args = len(inspect.getargspec(func)[0]) def add_meta(f): def decorator(*args, **kwargs): f(*args, **kwargs) decorator.bytes_needed = len_args - 1 # exclude self decorator.__name__ = f.__name__ return decorator func = add_meta(func) self._command_handlers[cmd] = func
python
def add_cmd_handler(self, cmd, func): len_args = len(inspect.getargspec(func)[0]) def add_meta(f): def decorator(*args, **kwargs): f(*args, **kwargs) decorator.bytes_needed = len_args - 1 # exclude self decorator.__name__ = f.__name__ return decorator func = add_meta(func) self._command_handlers[cmd] = func
[ "def", "add_cmd_handler", "(", "self", ",", "cmd", ",", "func", ")", ":", "len_args", "=", "len", "(", "inspect", ".", "getargspec", "(", "func", ")", "[", "0", "]", ")", "def", "add_meta", "(", "f", ")", ":", "def", "decorator", "(", "*", "args", ...
Adds a command handler for a command.
[ "Adds", "a", "command", "handler", "for", "a", "command", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L183-L194
226,786
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.get_pin
def get_pin(self, pin_def): """ Returns the activated pin given by the pin definition. May raise an ``InvalidPinDefError`` or a ``PinAlreadyTakenError``. :arg pin_def: Pin definition as described below, but without the arduino name. So for example ``a:1:i``. 'a' analog pin Pin number 'i' for input 'd' digital pin Pin number 'o' for output 'p' for pwm (Pulse-width modulation) All seperated by ``:``. """ if type(pin_def) == list: bits = pin_def else: bits = pin_def.split(':') a_d = bits[0] == 'a' and 'analog' or 'digital' part = getattr(self, a_d) pin_nr = int(bits[1]) if pin_nr >= len(part): raise InvalidPinDefError('Invalid pin definition: {0} at position 3 on {1}' .format(pin_def, self.name)) if getattr(part[pin_nr], 'mode', None) == UNAVAILABLE: raise InvalidPinDefError('Invalid pin definition: ' 'UNAVAILABLE pin {0} at position on {1}' .format(pin_def, self.name)) if self.taken[a_d][pin_nr]: raise PinAlreadyTakenError('{0} pin {1} is already taken on {2}' .format(a_d, bits[1], self.name)) # ok, should be available pin = part[pin_nr] self.taken[a_d][pin_nr] = True if pin.type is DIGITAL: if bits[2] == 'p': pin.mode = PWM elif bits[2] == 's': pin.mode = SERVO elif bits[2] != 'o': pin.mode = INPUT else: pin.enable_reporting() return pin
python
def get_pin(self, pin_def): if type(pin_def) == list: bits = pin_def else: bits = pin_def.split(':') a_d = bits[0] == 'a' and 'analog' or 'digital' part = getattr(self, a_d) pin_nr = int(bits[1]) if pin_nr >= len(part): raise InvalidPinDefError('Invalid pin definition: {0} at position 3 on {1}' .format(pin_def, self.name)) if getattr(part[pin_nr], 'mode', None) == UNAVAILABLE: raise InvalidPinDefError('Invalid pin definition: ' 'UNAVAILABLE pin {0} at position on {1}' .format(pin_def, self.name)) if self.taken[a_d][pin_nr]: raise PinAlreadyTakenError('{0} pin {1} is already taken on {2}' .format(a_d, bits[1], self.name)) # ok, should be available pin = part[pin_nr] self.taken[a_d][pin_nr] = True if pin.type is DIGITAL: if bits[2] == 'p': pin.mode = PWM elif bits[2] == 's': pin.mode = SERVO elif bits[2] != 'o': pin.mode = INPUT else: pin.enable_reporting() return pin
[ "def", "get_pin", "(", "self", ",", "pin_def", ")", ":", "if", "type", "(", "pin_def", ")", "==", "list", ":", "bits", "=", "pin_def", "else", ":", "bits", "=", "pin_def", ".", "split", "(", "':'", ")", "a_d", "=", "bits", "[", "0", "]", "==", ...
Returns the activated pin given by the pin definition. May raise an ``InvalidPinDefError`` or a ``PinAlreadyTakenError``. :arg pin_def: Pin definition as described below, but without the arduino name. So for example ``a:1:i``. 'a' analog pin Pin number 'i' for input 'd' digital pin Pin number 'o' for output 'p' for pwm (Pulse-width modulation) All seperated by ``:``.
[ "Returns", "the", "activated", "pin", "given", "by", "the", "pin", "definition", ".", "May", "raise", "an", "InvalidPinDefError", "or", "a", "PinAlreadyTakenError", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L196-L239
226,787
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.pass_time
def pass_time(self, t): """Non-blocking time-out for ``t`` seconds.""" cont = time.time() + t while time.time() < cont: time.sleep(0)
python
def pass_time(self, t): cont = time.time() + t while time.time() < cont: time.sleep(0)
[ "def", "pass_time", "(", "self", ",", "t", ")", ":", "cont", "=", "time", ".", "time", "(", ")", "+", "t", "while", "time", ".", "time", "(", ")", "<", "cont", ":", "time", ".", "sleep", "(", "0", ")" ]
Non-blocking time-out for ``t`` seconds.
[ "Non", "-", "blocking", "time", "-", "out", "for", "t", "seconds", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L241-L245
226,788
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.send_sysex
def send_sysex(self, sysex_cmd, data): """ Sends a SysEx msg. :arg sysex_cmd: A sysex command byte : arg data: a bytearray of 7-bit bytes of arbitrary data """ msg = bytearray([START_SYSEX, sysex_cmd]) msg.extend(data) msg.append(END_SYSEX) self.sp.write(msg)
python
def send_sysex(self, sysex_cmd, data): msg = bytearray([START_SYSEX, sysex_cmd]) msg.extend(data) msg.append(END_SYSEX) self.sp.write(msg)
[ "def", "send_sysex", "(", "self", ",", "sysex_cmd", ",", "data", ")", ":", "msg", "=", "bytearray", "(", "[", "START_SYSEX", ",", "sysex_cmd", "]", ")", "msg", ".", "extend", "(", "data", ")", "msg", ".", "append", "(", "END_SYSEX", ")", "self", ".",...
Sends a SysEx msg. :arg sysex_cmd: A sysex command byte : arg data: a bytearray of 7-bit bytes of arbitrary data
[ "Sends", "a", "SysEx", "msg", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L247-L257
226,789
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.servo_config
def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0): """ Configure a pin as servo with min_pulse, max_pulse and first angle. ``min_pulse`` and ``max_pulse`` default to the arduino defaults. """ if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE: raise IOError("Pin {0} is not a valid servo pin".format(pin)) data = bytearray([pin]) data += to_two_bytes(min_pulse) data += to_two_bytes(max_pulse) self.send_sysex(SERVO_CONFIG, data) # set pin._mode to SERVO so that it sends analog messages # don't set pin.mode as that calls this method self.digital[pin]._mode = SERVO self.digital[pin].write(angle)
python
def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0): if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE: raise IOError("Pin {0} is not a valid servo pin".format(pin)) data = bytearray([pin]) data += to_two_bytes(min_pulse) data += to_two_bytes(max_pulse) self.send_sysex(SERVO_CONFIG, data) # set pin._mode to SERVO so that it sends analog messages # don't set pin.mode as that calls this method self.digital[pin]._mode = SERVO self.digital[pin].write(angle)
[ "def", "servo_config", "(", "self", ",", "pin", ",", "min_pulse", "=", "544", ",", "max_pulse", "=", "2400", ",", "angle", "=", "0", ")", ":", "if", "pin", ">", "len", "(", "self", ".", "digital", ")", "or", "self", ".", "digital", "[", "pin", "]...
Configure a pin as servo with min_pulse, max_pulse and first angle. ``min_pulse`` and ``max_pulse`` default to the arduino defaults.
[ "Configure", "a", "pin", "as", "servo", "with", "min_pulse", "max_pulse", "and", "first", "angle", ".", "min_pulse", "and", "max_pulse", "default", "to", "the", "arduino", "defaults", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L312-L328
226,790
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.exit
def exit(self): """Call this to exit cleanly.""" # First detach all servo's, otherwise it somehow doesn't want to close... if hasattr(self, 'digital'): for pin in self.digital: if pin.mode == SERVO: pin.mode = OUTPUT if hasattr(self, 'sp'): self.sp.close()
python
def exit(self): # First detach all servo's, otherwise it somehow doesn't want to close... if hasattr(self, 'digital'): for pin in self.digital: if pin.mode == SERVO: pin.mode = OUTPUT if hasattr(self, 'sp'): self.sp.close()
[ "def", "exit", "(", "self", ")", ":", "# First detach all servo's, otherwise it somehow doesn't want to close...", "if", "hasattr", "(", "self", ",", "'digital'", ")", ":", "for", "pin", "in", "self", ".", "digital", ":", "if", "pin", ".", "mode", "==", "SERVO",...
Call this to exit cleanly.
[ "Call", "this", "to", "exit", "cleanly", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L330-L338
226,791
tino/pyFirmata
pyfirmata/pyfirmata.py
Board._handle_digital_message
def _handle_digital_message(self, port_nr, lsb, msb): """ Digital messages always go by the whole port. This means we have a bitmask which we update the port. """ mask = (msb << 7) + lsb try: self.digital_ports[port_nr]._update(mask) except IndexError: raise ValueError
python
def _handle_digital_message(self, port_nr, lsb, msb): mask = (msb << 7) + lsb try: self.digital_ports[port_nr]._update(mask) except IndexError: raise ValueError
[ "def", "_handle_digital_message", "(", "self", ",", "port_nr", ",", "lsb", ",", "msb", ")", ":", "mask", "=", "(", "msb", "<<", "7", ")", "+", "lsb", "try", ":", "self", ".", "digital_ports", "[", "port_nr", "]", ".", "_update", "(", "mask", ")", "...
Digital messages always go by the whole port. This means we have a bitmask which we update the port.
[ "Digital", "messages", "always", "go", "by", "the", "whole", "port", ".", "This", "means", "we", "have", "a", "bitmask", "which", "we", "update", "the", "port", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L350-L359
226,792
tino/pyFirmata
pyfirmata/pyfirmata.py
Port.enable_reporting
def enable_reporting(self): """Enable reporting of values for the whole port.""" self.reporting = True msg = bytearray([REPORT_DIGITAL + self.port_number, 1]) self.board.sp.write(msg) for pin in self.pins: if pin.mode == INPUT: pin.reporting = True
python
def enable_reporting(self): self.reporting = True msg = bytearray([REPORT_DIGITAL + self.port_number, 1]) self.board.sp.write(msg) for pin in self.pins: if pin.mode == INPUT: pin.reporting = True
[ "def", "enable_reporting", "(", "self", ")", ":", "self", ".", "reporting", "=", "True", "msg", "=", "bytearray", "(", "[", "REPORT_DIGITAL", "+", "self", ".", "port_number", ",", "1", "]", ")", "self", ".", "board", ".", "sp", ".", "write", "(", "ms...
Enable reporting of values for the whole port.
[ "Enable", "reporting", "of", "values", "for", "the", "whole", "port", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L402-L410
226,793
tino/pyFirmata
pyfirmata/pyfirmata.py
Port.disable_reporting
def disable_reporting(self): """Disable the reporting of the port.""" self.reporting = False msg = bytearray([REPORT_DIGITAL + self.port_number, 0]) self.board.sp.write(msg)
python
def disable_reporting(self): self.reporting = False msg = bytearray([REPORT_DIGITAL + self.port_number, 0]) self.board.sp.write(msg)
[ "def", "disable_reporting", "(", "self", ")", ":", "self", ".", "reporting", "=", "False", "msg", "=", "bytearray", "(", "[", "REPORT_DIGITAL", "+", "self", ".", "port_number", ",", "0", "]", ")", "self", ".", "board", ".", "sp", ".", "write", "(", "...
Disable the reporting of the port.
[ "Disable", "the", "reporting", "of", "the", "port", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L412-L416
226,794
tino/pyFirmata
pyfirmata/pyfirmata.py
Port.write
def write(self): """Set the output pins of the port to the correct state.""" mask = 0 for pin in self.pins: if pin.mode == OUTPUT: if pin.value == 1: pin_nr = pin.pin_number - self.port_number * 8 mask |= 1 << int(pin_nr) # print("type mask", type(mask)) # print("type self.portnumber", type(self.port_number)) # print("type pinnr", type(pin_nr)) msg = bytearray([DIGITAL_MESSAGE + self.port_number, mask % 128, mask >> 7]) self.board.sp.write(msg)
python
def write(self): mask = 0 for pin in self.pins: if pin.mode == OUTPUT: if pin.value == 1: pin_nr = pin.pin_number - self.port_number * 8 mask |= 1 << int(pin_nr) # print("type mask", type(mask)) # print("type self.portnumber", type(self.port_number)) # print("type pinnr", type(pin_nr)) msg = bytearray([DIGITAL_MESSAGE + self.port_number, mask % 128, mask >> 7]) self.board.sp.write(msg)
[ "def", "write", "(", "self", ")", ":", "mask", "=", "0", "for", "pin", "in", "self", ".", "pins", ":", "if", "pin", ".", "mode", "==", "OUTPUT", ":", "if", "pin", ".", "value", "==", "1", ":", "pin_nr", "=", "pin", ".", "pin_number", "-", "self...
Set the output pins of the port to the correct state.
[ "Set", "the", "output", "pins", "of", "the", "port", "to", "the", "correct", "state", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L418-L430
226,795
tino/pyFirmata
pyfirmata/pyfirmata.py
Port._update
def _update(self, mask): """Update the values for the pins marked as input with the mask.""" if self.reporting: for pin in self.pins: if pin.mode is INPUT: pin_nr = pin.pin_number - self.port_number * 8 pin.value = (mask & (1 << pin_nr)) > 0
python
def _update(self, mask): if self.reporting: for pin in self.pins: if pin.mode is INPUT: pin_nr = pin.pin_number - self.port_number * 8 pin.value = (mask & (1 << pin_nr)) > 0
[ "def", "_update", "(", "self", ",", "mask", ")", ":", "if", "self", ".", "reporting", ":", "for", "pin", "in", "self", ".", "pins", ":", "if", "pin", ".", "mode", "is", "INPUT", ":", "pin_nr", "=", "pin", ".", "pin_number", "-", "self", ".", "por...
Update the values for the pins marked as input with the mask.
[ "Update", "the", "values", "for", "the", "pins", "marked", "as", "input", "with", "the", "mask", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L432-L438
226,796
tino/pyFirmata
pyfirmata/pyfirmata.py
Pin.enable_reporting
def enable_reporting(self): """Set an input pin to report values.""" if self.mode is not INPUT: raise IOError("{0} is not an input and can therefore not report".format(self)) if self.type == ANALOG: self.reporting = True msg = bytearray([REPORT_ANALOG + self.pin_number, 1]) self.board.sp.write(msg) else: self.port.enable_reporting()
python
def enable_reporting(self): if self.mode is not INPUT: raise IOError("{0} is not an input and can therefore not report".format(self)) if self.type == ANALOG: self.reporting = True msg = bytearray([REPORT_ANALOG + self.pin_number, 1]) self.board.sp.write(msg) else: self.port.enable_reporting()
[ "def", "enable_reporting", "(", "self", ")", ":", "if", "self", ".", "mode", "is", "not", "INPUT", ":", "raise", "IOError", "(", "\"{0} is not an input and can therefore not report\"", ".", "format", "(", "self", ")", ")", "if", "self", ".", "type", "==", "A...
Set an input pin to report values.
[ "Set", "an", "input", "pin", "to", "report", "values", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L488-L497
226,797
tino/pyFirmata
pyfirmata/pyfirmata.py
Pin.disable_reporting
def disable_reporting(self): """Disable the reporting of an input pin.""" if self.type == ANALOG: self.reporting = False msg = bytearray([REPORT_ANALOG + self.pin_number, 0]) self.board.sp.write(msg) else: self.port.disable_reporting()
python
def disable_reporting(self): if self.type == ANALOG: self.reporting = False msg = bytearray([REPORT_ANALOG + self.pin_number, 0]) self.board.sp.write(msg) else: self.port.disable_reporting()
[ "def", "disable_reporting", "(", "self", ")", ":", "if", "self", ".", "type", "==", "ANALOG", ":", "self", ".", "reporting", "=", "False", "msg", "=", "bytearray", "(", "[", "REPORT_ANALOG", "+", "self", ".", "pin_number", ",", "0", "]", ")", "self", ...
Disable the reporting of an input pin.
[ "Disable", "the", "reporting", "of", "an", "input", "pin", "." ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L500-L507
226,798
tino/pyFirmata
pyfirmata/pyfirmata.py
Pin.write
def write(self, value): """ Output a voltage from the pin :arg value: Uses value as a boolean if the pin is in output mode, or expects a float from 0 to 1 if the pin is in PWM mode. If the pin is in SERVO the value should be in degrees. """ if self.mode is UNAVAILABLE: raise IOError("{0} can not be used through Firmata".format(self)) if self.mode is INPUT: raise IOError("{0} is set up as an INPUT and can therefore not be written to" .format(self)) if value is not self.value: self.value = value if self.mode is OUTPUT: if self.port: self.port.write() else: msg = bytearray([DIGITAL_MESSAGE, self.pin_number, value]) self.board.sp.write(msg) elif self.mode is PWM: value = int(round(value * 255)) msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7]) self.board.sp.write(msg) elif self.mode is SERVO: value = int(value) msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7]) self.board.sp.write(msg)
python
def write(self, value): if self.mode is UNAVAILABLE: raise IOError("{0} can not be used through Firmata".format(self)) if self.mode is INPUT: raise IOError("{0} is set up as an INPUT and can therefore not be written to" .format(self)) if value is not self.value: self.value = value if self.mode is OUTPUT: if self.port: self.port.write() else: msg = bytearray([DIGITAL_MESSAGE, self.pin_number, value]) self.board.sp.write(msg) elif self.mode is PWM: value = int(round(value * 255)) msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7]) self.board.sp.write(msg) elif self.mode is SERVO: value = int(value) msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7]) self.board.sp.write(msg)
[ "def", "write", "(", "self", ",", "value", ")", ":", "if", "self", ".", "mode", "is", "UNAVAILABLE", ":", "raise", "IOError", "(", "\"{0} can not be used through Firmata\"", ".", "format", "(", "self", ")", ")", "if", "self", ".", "mode", "is", "INPUT", ...
Output a voltage from the pin :arg value: Uses value as a boolean if the pin is in output mode, or expects a float from 0 to 1 if the pin is in PWM mode. If the pin is in SERVO the value should be in degrees.
[ "Output", "a", "voltage", "from", "the", "pin" ]
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L520-L549
226,799
tino/pyFirmata
pyfirmata/util.py
get_the_board
def get_the_board( layout=BOARDS["arduino"], base_dir="/dev/", identifier="tty.usbserial" ): """ Helper function to get the one and only board connected to the computer running this. It assumes a normal arduino layout, but this can be overriden by passing a different layout dict as the ``layout`` parameter. ``base_dir`` and ``identifier`` are overridable as well. It will raise an IOError if it can't find a board, on a serial, or if it finds more than one. """ from .pyfirmata import Board # prevent a circular import boards = [] for device in os.listdir(base_dir): if device.startswith(identifier): try: board = Board(os.path.join(base_dir, device), layout) except serial.SerialException: pass else: boards.append(board) if len(boards) == 0: raise IOError( "No boards found in {0} with identifier {1}".format(base_dir, identifier) ) elif len(boards) > 1: raise IOError("More than one board found!") return boards[0]
python
def get_the_board( layout=BOARDS["arduino"], base_dir="/dev/", identifier="tty.usbserial" ): from .pyfirmata import Board # prevent a circular import boards = [] for device in os.listdir(base_dir): if device.startswith(identifier): try: board = Board(os.path.join(base_dir, device), layout) except serial.SerialException: pass else: boards.append(board) if len(boards) == 0: raise IOError( "No boards found in {0} with identifier {1}".format(base_dir, identifier) ) elif len(boards) > 1: raise IOError("More than one board found!") return boards[0]
[ "def", "get_the_board", "(", "layout", "=", "BOARDS", "[", "\"arduino\"", "]", ",", "base_dir", "=", "\"/dev/\"", ",", "identifier", "=", "\"tty.usbserial\"", ")", ":", "from", ".", "pyfirmata", "import", "Board", "# prevent a circular import", "boards", "=", "[...
Helper function to get the one and only board connected to the computer running this. It assumes a normal arduino layout, but this can be overriden by passing a different layout dict as the ``layout`` parameter. ``base_dir`` and ``identifier`` are overridable as well. It will raise an IOError if it can't find a board, on a serial, or if it finds more than one.
[ "Helper", "function", "to", "get", "the", "one", "and", "only", "board", "connected", "to", "the", "computer", "running", "this", ".", "It", "assumes", "a", "normal", "arduino", "layout", "but", "this", "can", "be", "overriden", "by", "passing", "a", "diff...
05881909c4d7c4e808e9ed457144670b2136706e
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/util.py#L13-L41