body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
f1cfc2ab63c8236965c6f153a760e8e4870f29c7c0a921bfb93c6d4c028ad5c5
def has_straight_flush(self): 'Returns True if the hand has a straight flush' suited_hands = {} for card in self.cards: suited_hands.setdefault(card.suit, PokerHand()).add_card(card) for suit in suited_hands: if suited_hands[suit].has_straight(): return True return False
Returns True if the hand has a straight flush
chap18/PokerHand.py
has_straight_flush
theChad/ThinkPython
0
python
def has_straight_flush(self): suited_hands = {} for card in self.cards: suited_hands.setdefault(card.suit, PokerHand()).add_card(card) for suit in suited_hands: if suited_hands[suit].has_straight(): return True return False
def has_straight_flush(self): suited_hands = {} for card in self.cards: suited_hands.setdefault(card.suit, PokerHand()).add_card(card) for suit in suited_hands: if suited_hands[suit].has_straight(): return True return False<|docstring|>Returns True if the hand has a straight flush<|endoftext|>
f7150d5a4ca0c14f029e133df65fbc190a70a6faa1b6356cc0b40e261be6afc6
def classify(self): 'Returns the highest-value classification for a hand.' for classification in self.classifications: if self.class_tests[classification](self): self.label = classification return classification self.label = None return None
Returns the highest-value classification for a hand.
chap18/PokerHand.py
classify
theChad/ThinkPython
0
python
def classify(self): for classification in self.classifications: if self.class_tests[classification](self): self.label = classification return classification self.label = None return None
def classify(self): for classification in self.classifications: if self.class_tests[classification](self): self.label = classification return classification self.label = None return None<|docstring|>Returns the highest-value classification for a hand.<|endoftext|>
9d78a3a31dce1e5a1ef6bb0f113df54dc82cbdb2caa9665e8f883e49d6015aaa
def shuffle(X, y, seed=None): '\n Shuffles the batch from a given set of datapoints and lables.\n ' if seed: np.random.seed(seed) index = np.arange(X.shape[0]) np.random.shuffle(index) return (X[index], y[index])
Shuffles the batch from a given set of datapoints and lables.
AKDPRFramework/utils/dataops.py
shuffle
theroyakash/AKDPRFramework
2
python
def shuffle(X, y, seed=None): '\n \n ' if seed: np.random.seed(seed) index = np.arange(X.shape[0]) np.random.shuffle(index) return (X[index], y[index])
def shuffle(X, y, seed=None): '\n \n ' if seed: np.random.seed(seed) index = np.arange(X.shape[0]) np.random.shuffle(index) return (X[index], y[index])<|docstring|>Shuffles the batch from a given set of datapoints and lables.<|endoftext|>
40eab803f851a9abc1242b71e24dba9cad5f0fa884062caabb1fc95a82f534bf
def batch_iterator(X, y=None, batch_size=32): '\n Batch generator class\n\n Args:\n - X: X data\n - y: labels for each X data\n - batch_size: Batch size you want to generate. Defaults to 32\n ' number_of_samples = X.shape[0] for i in np.arange(0, number_of_samples, batch_size): (start, end) = (i, min((i + batch_size), number_of_samples)) if (y is not None): (yield (X[start:end], y[start:end])) else: (yield X[start:end])
Batch generator class Args: - X: X data - y: labels for each X data - batch_size: Batch size you want to generate. Defaults to 32
AKDPRFramework/utils/dataops.py
batch_iterator
theroyakash/AKDPRFramework
2
python
def batch_iterator(X, y=None, batch_size=32): '\n Batch generator class\n\n Args:\n - X: X data\n - y: labels for each X data\n - batch_size: Batch size you want to generate. Defaults to 32\n ' number_of_samples = X.shape[0] for i in np.arange(0, number_of_samples, batch_size): (start, end) = (i, min((i + batch_size), number_of_samples)) if (y is not None): (yield (X[start:end], y[start:end])) else: (yield X[start:end])
def batch_iterator(X, y=None, batch_size=32): '\n Batch generator class\n\n Args:\n - X: X data\n - y: labels for each X data\n - batch_size: Batch size you want to generate. Defaults to 32\n ' number_of_samples = X.shape[0] for i in np.arange(0, number_of_samples, batch_size): (start, end) = (i, min((i + batch_size), number_of_samples)) if (y is not None): (yield (X[start:end], y[start:end])) else: (yield X[start:end])<|docstring|>Batch generator class Args: - X: X data - y: labels for each X data - batch_size: Batch size you want to generate. Defaults to 32<|endoftext|>
0f29c7dba88100cd92acdad7f762cd54f325f66264838b3942a7944811fb0bb8
def to_categorical(x, n_col=None): "\n Preforms one hot encodings for the data labels\n\n Args:\n - ``X``: Numpy Array containing your data points\n - ``n_col``: Number of column for your data. If not explicitly mentioned, it's automatically calculated.\n\n Example::\n >>> import numpy as np\n >>> def to_categorical(x, n_col=None):\n >>> if not n_col:\n >>> n_col = np.amax(x) + 1\n\n >>> one_hot = np.zeros((x.shape[0], n_col))\n >>> one_hot[np.arange(x.shape[0]), x] = 1\n >>> return one_hot\n\n >>> x = np.array([2, 3, 4, 1, 2, 3])\n >>> z = to_categorical(x, 5)\n >>> print(z)\n\n >>> x = np.array([1, 2, 3, 4, 6])\n >>> z = to_categorical(x, 7)\n >>> print(z)\n " if (not n_col): n_col = (np.amax(x) + 1) one_hot = np.zeros((x.shape[0], n_col)) one_hot[(np.arange(x.shape[0]), x)] = 1 return one_hot
Preforms one hot encodings for the data labels Args: - ``X``: Numpy Array containing your data points - ``n_col``: Number of column for your data. If not explicitly mentioned, it's automatically calculated. Example:: >>> import numpy as np >>> def to_categorical(x, n_col=None): >>> if not n_col: >>> n_col = np.amax(x) + 1 >>> one_hot = np.zeros((x.shape[0], n_col)) >>> one_hot[np.arange(x.shape[0]), x] = 1 >>> return one_hot >>> x = np.array([2, 3, 4, 1, 2, 3]) >>> z = to_categorical(x, 5) >>> print(z) >>> x = np.array([1, 2, 3, 4, 6]) >>> z = to_categorical(x, 7) >>> print(z)
AKDPRFramework/utils/dataops.py
to_categorical
theroyakash/AKDPRFramework
2
python
def to_categorical(x, n_col=None): "\n Preforms one hot encodings for the data labels\n\n Args:\n - ``X``: Numpy Array containing your data points\n - ``n_col``: Number of column for your data. If not explicitly mentioned, it's automatically calculated.\n\n Example::\n >>> import numpy as np\n >>> def to_categorical(x, n_col=None):\n >>> if not n_col:\n >>> n_col = np.amax(x) + 1\n\n >>> one_hot = np.zeros((x.shape[0], n_col))\n >>> one_hot[np.arange(x.shape[0]), x] = 1\n >>> return one_hot\n\n >>> x = np.array([2, 3, 4, 1, 2, 3])\n >>> z = to_categorical(x, 5)\n >>> print(z)\n\n >>> x = np.array([1, 2, 3, 4, 6])\n >>> z = to_categorical(x, 7)\n >>> print(z)\n " if (not n_col): n_col = (np.amax(x) + 1) one_hot = np.zeros((x.shape[0], n_col)) one_hot[(np.arange(x.shape[0]), x)] = 1 return one_hot
def to_categorical(x, n_col=None): "\n Preforms one hot encodings for the data labels\n\n Args:\n - ``X``: Numpy Array containing your data points\n - ``n_col``: Number of column for your data. If not explicitly mentioned, it's automatically calculated.\n\n Example::\n >>> import numpy as np\n >>> def to_categorical(x, n_col=None):\n >>> if not n_col:\n >>> n_col = np.amax(x) + 1\n\n >>> one_hot = np.zeros((x.shape[0], n_col))\n >>> one_hot[np.arange(x.shape[0]), x] = 1\n >>> return one_hot\n\n >>> x = np.array([2, 3, 4, 1, 2, 3])\n >>> z = to_categorical(x, 5)\n >>> print(z)\n\n >>> x = np.array([1, 2, 3, 4, 6])\n >>> z = to_categorical(x, 7)\n >>> print(z)\n " if (not n_col): n_col = (np.amax(x) + 1) one_hot = np.zeros((x.shape[0], n_col)) one_hot[(np.arange(x.shape[0]), x)] = 1 return one_hot<|docstring|>Preforms one hot encodings for the data labels Args: - ``X``: Numpy Array containing your data points - ``n_col``: Number of column for your data. If not explicitly mentioned, it's automatically calculated. Example:: >>> import numpy as np >>> def to_categorical(x, n_col=None): >>> if not n_col: >>> n_col = np.amax(x) + 1 >>> one_hot = np.zeros((x.shape[0], n_col)) >>> one_hot[np.arange(x.shape[0]), x] = 1 >>> return one_hot >>> x = np.array([2, 3, 4, 1, 2, 3]) >>> z = to_categorical(x, 5) >>> print(z) >>> x = np.array([1, 2, 3, 4, 6]) >>> z = to_categorical(x, 7) >>> print(z)<|endoftext|>
33058a65955dbecd6efe0a288ce8de8e6bd7d40e42582f5f87ed6b3c6963e67a
def diag(x): '\n Vector to diagonal matrix conversion.\n ' diagonal = np.zeros((len(x), len(x))) for i in range(len(diagonal[0])): diagonal[(i, i)] = x[i] return diagonal
Vector to diagonal matrix conversion.
AKDPRFramework/utils/dataops.py
diag
theroyakash/AKDPRFramework
2
python
def diag(x): '\n \n ' diagonal = np.zeros((len(x), len(x))) for i in range(len(diagonal[0])): diagonal[(i, i)] = x[i] return diagonal
def diag(x): '\n \n ' diagonal = np.zeros((len(x), len(x))) for i in range(len(diagonal[0])): diagonal[(i, i)] = x[i] return diagonal<|docstring|>Vector to diagonal matrix conversion.<|endoftext|>
69cec0530dc9063b15f438b2aa4da00d2d06bdaa34c8731cbef648072bf866fa
def train_test_split(X, y, test_size=0.5, makeshuffle=True, seed=None): '\n Make train test split on dataset.\n\n Args:\n - ``X``: X dataset of numpy array\n - ``y``: y labels for that array\n - ``test_size``: How much percentage of data you want for your test dataset\n - ``makeshuffle``: do you want to shuffle the data before splitting?\n - ``seed``: mention a random seed for reproducing results\n\n Returns:\n ``X_train``, ``X_test``, ``y_train``, ``y_test``\n\n ' if makeshuffle: (X, y) = shuffle(X, y, seed) split_i = (len(y) - int((len(y) // (1 / test_size)))) (X_train, X_test) = (X[:split_i], X[split_i:]) (y_train, y_test) = (y[:split_i], y[split_i:]) return (X_train, X_test, y_train, y_test)
Make train test split on dataset. Args: - ``X``: X dataset of numpy array - ``y``: y labels for that array - ``test_size``: How much percentage of data you want for your test dataset - ``makeshuffle``: do you want to shuffle the data before splitting? - ``seed``: mention a random seed for reproducing results Returns: ``X_train``, ``X_test``, ``y_train``, ``y_test``
AKDPRFramework/utils/dataops.py
train_test_split
theroyakash/AKDPRFramework
2
python
def train_test_split(X, y, test_size=0.5, makeshuffle=True, seed=None): '\n Make train test split on dataset.\n\n Args:\n - ``X``: X dataset of numpy array\n - ``y``: y labels for that array\n - ``test_size``: How much percentage of data you want for your test dataset\n - ``makeshuffle``: do you want to shuffle the data before splitting?\n - ``seed``: mention a random seed for reproducing results\n\n Returns:\n ``X_train``, ``X_test``, ``y_train``, ``y_test``\n\n ' if makeshuffle: (X, y) = shuffle(X, y, seed) split_i = (len(y) - int((len(y) // (1 / test_size)))) (X_train, X_test) = (X[:split_i], X[split_i:]) (y_train, y_test) = (y[:split_i], y[split_i:]) return (X_train, X_test, y_train, y_test)
def train_test_split(X, y, test_size=0.5, makeshuffle=True, seed=None): '\n Make train test split on dataset.\n\n Args:\n - ``X``: X dataset of numpy array\n - ``y``: y labels for that array\n - ``test_size``: How much percentage of data you want for your test dataset\n - ``makeshuffle``: do you want to shuffle the data before splitting?\n - ``seed``: mention a random seed for reproducing results\n\n Returns:\n ``X_train``, ``X_test``, ``y_train``, ``y_test``\n\n ' if makeshuffle: (X, y) = shuffle(X, y, seed) split_i = (len(y) - int((len(y) // (1 / test_size)))) (X_train, X_test) = (X[:split_i], X[split_i:]) (y_train, y_test) = (y[:split_i], y[split_i:]) return (X_train, X_test, y_train, y_test)<|docstring|>Make train test split on dataset. Args: - ``X``: X dataset of numpy array - ``y``: y labels for that array - ``test_size``: How much percentage of data you want for your test dataset - ``makeshuffle``: do you want to shuffle the data before splitting? - ``seed``: mention a random seed for reproducing results Returns: ``X_train``, ``X_test``, ``y_train``, ``y_test``<|endoftext|>
baeeda7b98bd4617afabf1ab8ef18ffd48f0a2ced30187d7eb17ce619283cb74
def euclidean_distance(x1, x2): '\n Calculates the l2 distance between two vectors\n ' distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance)
Calculates the l2 distance between two vectors
AKDPRFramework/utils/dataops.py
euclidean_distance
theroyakash/AKDPRFramework
2
python
def euclidean_distance(x1, x2): '\n \n ' distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance)
def euclidean_distance(x1, x2): '\n \n ' distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance)<|docstring|>Calculates the l2 distance between two vectors<|endoftext|>
11aea283dbadcf1c96f7b89be88542ce5d308b45300a72b2fe7f09a23b51014f
def normalize(X, axis=(- 1), order=2): ' Normalize the dataset X ' l2 = np.atleast_1d(np.linalg.norm(X, order, axis)) l2[(l2 == 0)] = 1 return (X / np.expand_dims(l2, axis))
Normalize the dataset X
AKDPRFramework/utils/dataops.py
normalize
theroyakash/AKDPRFramework
2
python
def normalize(X, axis=(- 1), order=2): ' ' l2 = np.atleast_1d(np.linalg.norm(X, order, axis)) l2[(l2 == 0)] = 1 return (X / np.expand_dims(l2, axis))
def normalize(X, axis=(- 1), order=2): ' ' l2 = np.atleast_1d(np.linalg.norm(X, order, axis)) l2[(l2 == 0)] = 1 return (X / np.expand_dims(l2, axis))<|docstring|>Normalize the dataset X<|endoftext|>
7b700b3d61a85e4f85c1e2476b61a230066ff33484766fabb9bf333d4fad0617
def download_progress_hook(count, blockSize, totalSize): 'A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 1% change in download progress.\n ' global last_percent_reported percent = int((((count * blockSize) * 100) / totalSize)) if (last_percent_reported != percent): if ((percent % 5) == 0): sys.stdout.write(('%s%%' % percent)) sys.stdout.flush() else: sys.stdout.write('.') sys.stdout.flush() last_percent_reported = percent
A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 1% change in download progress.
create_bbox_SVHN_dataset.py
download_progress_hook
BananuhBeatDown/digit_recognition
2
python
def download_progress_hook(count, blockSize, totalSize): 'A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 1% change in download progress.\n ' global last_percent_reported percent = int((((count * blockSize) * 100) / totalSize)) if (last_percent_reported != percent): if ((percent % 5) == 0): sys.stdout.write(('%s%%' % percent)) sys.stdout.flush() else: sys.stdout.write('.') sys.stdout.flush() last_percent_reported = percent
def download_progress_hook(count, blockSize, totalSize): 'A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 1% change in download progress.\n ' global last_percent_reported percent = int((((count * blockSize) * 100) / totalSize)) if (last_percent_reported != percent): if ((percent % 5) == 0): sys.stdout.write(('%s%%' % percent)) sys.stdout.flush() else: sys.stdout.write('.') sys.stdout.flush() last_percent_reported = percent<|docstring|>A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 1% change in download progress.<|endoftext|>
0e03570a22e68c3439ae6a0796b2f4bb3a83708fcd56c5f4fb13d7ee20dc3ec6
def maybe_download(filename, force=False): "Download a file if not present, and make sure it's the right size." if (force or (not os.path.exists(filename))): print('Attempting to download:', filename) (filename, _) = urlretrieve((url + filename), filename, reporthook=download_progress_hook) print('\nDownload Complete!') else: print('Already downloaded') return filename
Download a file if not present, and make sure it's the right size.
create_bbox_SVHN_dataset.py
maybe_download
BananuhBeatDown/digit_recognition
2
python
def maybe_download(filename, force=False): if (force or (not os.path.exists(filename))): print('Attempting to download:', filename) (filename, _) = urlretrieve((url + filename), filename, reporthook=download_progress_hook) print('\nDownload Complete!') else: print('Already downloaded') return filename
def maybe_download(filename, force=False): if (force or (not os.path.exists(filename))): print('Attempting to download:', filename) (filename, _) = urlretrieve((url + filename), filename, reporthook=download_progress_hook) print('\nDownload Complete!') else: print('Already downloaded') return filename<|docstring|>Download a file if not present, and make sure it's the right size.<|endoftext|>
75e7d5242d239bceb3970bbb146d8f0a035bfeefb651349a3d619f4b94142138
def main(db_url, output_filename, debug=False): 'Implement the ``thapbi_pict conflicts`` subcommand.\n\n Looks for taxonomy conflicts at marker, genus or species level, with the\n number of marker or genus level conflicts used as the return code. i.e.\n Unix failure (non-zero) when there are marker or genus level conflicts.\n\n A marker level conflict is when a unique sequence appears in the DB under\n more than one marker name (e.g. both COI and ITS1), which is most likely\n an error in the DB construction.\n\n Genus level conflicts are where a unique sequence in the DB is reported\n from more than one genus, which is considered undesirable. Similarly for\n species level conflicts, but for some markers this is sadly common and not\n considered to be an error.\n ' if (output_filename == '-'): out_handle = sys.stdout else: out_handle = open(output_filename, 'w') Session = connect_to_db(db_url, echo=False) session = Session() cur_tax = aliased(Taxonomy) marker_seq = aliased(MarkerSeq) marker_def = aliased(MarkerDef) view = session.query(SeqSource).join(marker_seq, SeqSource.marker_seq).join(marker_def, SeqSource.marker_definition).join(cur_tax, SeqSource.taxonomy).options(contains_eager(SeqSource.marker_seq, alias=marker_seq)).options(contains_eager(SeqSource.marker_definition, alias=marker_def)).options(contains_eager(SeqSource.taxonomy, alias=cur_tax)) md5_to_seq = {} md5_to_marker = {} md5_to_genus = {} md5_to_species = {} for seq_source in view: md5 = seq_source.marker_seq.md5 seq = seq_source.marker_seq.sequence genus = seq_source.taxonomy.genus md5_to_seq[md5] = seq try: md5_to_marker[md5].add(seq_source.marker_definition.name) except KeyError: md5_to_marker[md5] = {seq_source.marker_definition.name} if genus: try: md5_to_genus[md5].add(genus) except KeyError: md5_to_genus[md5] = {genus} if seq_source.taxonomy.species: genus_species = genus_species_name(genus, seq_source.taxonomy.species) try: md5_to_species[md5].add(genus_species) except KeyError: md5_to_species[md5] = {genus_species} sys.stderr.write(f'''Loaded taxonomy for {len(md5_to_seq)} sequences from DB ''') marker_conflicts = 0 genus_conflicts = 0 out_handle.write('#MD5\tLevel\tConflicts\n') for (md5, markers) in sorted(md5_to_marker.items()): if (len(markers) > 1): out_handle.write(f'''{md5} marker {';'.join(sorted(markers))} ''') marker_conflicts += 1 for (md5, genus) in sorted(md5_to_genus.items()): if (len(genus) > 1): out_handle.write(f'''{md5} genus {';'.join(sorted(genus))} ''') genus_conflicts += 1 for (md5, species) in sorted(md5_to_species.items()): if (len(species) > 1): out_handle.write(f'''{md5} species {';'.join(sorted(species))} ''') if (output_filename != '-'): out_handle.close() if debug: sys.stderr.write(f'''{marker_conflicts} marker level conflicts ''') sys.stderr.write(f'''{genus_conflicts} genus level conflicts ''') return (marker_conflicts + genus_conflicts)
Implement the ``thapbi_pict conflicts`` subcommand. Looks for taxonomy conflicts at marker, genus or species level, with the number of marker or genus level conflicts used as the return code. i.e. Unix failure (non-zero) when there are marker or genus level conflicts. A marker level conflict is when a unique sequence appears in the DB under more than one marker name (e.g. both COI and ITS1), which is most likely an error in the DB construction. Genus level conflicts are where a unique sequence in the DB is reported from more than one genus, which is considered undesirable. Similarly for species level conflicts, but for some markers this is sadly common and not considered to be an error.
thapbi_pict/conflicts.py
main
HuttonICS/thapbi-pict
6
python
def main(db_url, output_filename, debug=False): 'Implement the ``thapbi_pict conflicts`` subcommand.\n\n Looks for taxonomy conflicts at marker, genus or species level, with the\n number of marker or genus level conflicts used as the return code. i.e.\n Unix failure (non-zero) when there are marker or genus level conflicts.\n\n A marker level conflict is when a unique sequence appears in the DB under\n more than one marker name (e.g. both COI and ITS1), which is most likely\n an error in the DB construction.\n\n Genus level conflicts are where a unique sequence in the DB is reported\n from more than one genus, which is considered undesirable. Similarly for\n species level conflicts, but for some markers this is sadly common and not\n considered to be an error.\n ' if (output_filename == '-'): out_handle = sys.stdout else: out_handle = open(output_filename, 'w') Session = connect_to_db(db_url, echo=False) session = Session() cur_tax = aliased(Taxonomy) marker_seq = aliased(MarkerSeq) marker_def = aliased(MarkerDef) view = session.query(SeqSource).join(marker_seq, SeqSource.marker_seq).join(marker_def, SeqSource.marker_definition).join(cur_tax, SeqSource.taxonomy).options(contains_eager(SeqSource.marker_seq, alias=marker_seq)).options(contains_eager(SeqSource.marker_definition, alias=marker_def)).options(contains_eager(SeqSource.taxonomy, alias=cur_tax)) md5_to_seq = {} md5_to_marker = {} md5_to_genus = {} md5_to_species = {} for seq_source in view: md5 = seq_source.marker_seq.md5 seq = seq_source.marker_seq.sequence genus = seq_source.taxonomy.genus md5_to_seq[md5] = seq try: md5_to_marker[md5].add(seq_source.marker_definition.name) except KeyError: md5_to_marker[md5] = {seq_source.marker_definition.name} if genus: try: md5_to_genus[md5].add(genus) except KeyError: md5_to_genus[md5] = {genus} if seq_source.taxonomy.species: genus_species = genus_species_name(genus, seq_source.taxonomy.species) try: md5_to_species[md5].add(genus_species) except KeyError: md5_to_species[md5] = {genus_species} sys.stderr.write(f'Loaded taxonomy for {len(md5_to_seq)} sequences from DB ') marker_conflicts = 0 genus_conflicts = 0 out_handle.write('#MD5\tLevel\tConflicts\n') for (md5, markers) in sorted(md5_to_marker.items()): if (len(markers) > 1): out_handle.write(f'{md5} marker {';'.join(sorted(markers))} ') marker_conflicts += 1 for (md5, genus) in sorted(md5_to_genus.items()): if (len(genus) > 1): out_handle.write(f'{md5} genus {';'.join(sorted(genus))} ') genus_conflicts += 1 for (md5, species) in sorted(md5_to_species.items()): if (len(species) > 1): out_handle.write(f'{md5} species {';'.join(sorted(species))} ') if (output_filename != '-'): out_handle.close() if debug: sys.stderr.write(f'{marker_conflicts} marker level conflicts ') sys.stderr.write(f'{genus_conflicts} genus level conflicts ') return (marker_conflicts + genus_conflicts)
def main(db_url, output_filename, debug=False): 'Implement the ``thapbi_pict conflicts`` subcommand.\n\n Looks for taxonomy conflicts at marker, genus or species level, with the\n number of marker or genus level conflicts used as the return code. i.e.\n Unix failure (non-zero) when there are marker or genus level conflicts.\n\n A marker level conflict is when a unique sequence appears in the DB under\n more than one marker name (e.g. both COI and ITS1), which is most likely\n an error in the DB construction.\n\n Genus level conflicts are where a unique sequence in the DB is reported\n from more than one genus, which is considered undesirable. Similarly for\n species level conflicts, but for some markers this is sadly common and not\n considered to be an error.\n ' if (output_filename == '-'): out_handle = sys.stdout else: out_handle = open(output_filename, 'w') Session = connect_to_db(db_url, echo=False) session = Session() cur_tax = aliased(Taxonomy) marker_seq = aliased(MarkerSeq) marker_def = aliased(MarkerDef) view = session.query(SeqSource).join(marker_seq, SeqSource.marker_seq).join(marker_def, SeqSource.marker_definition).join(cur_tax, SeqSource.taxonomy).options(contains_eager(SeqSource.marker_seq, alias=marker_seq)).options(contains_eager(SeqSource.marker_definition, alias=marker_def)).options(contains_eager(SeqSource.taxonomy, alias=cur_tax)) md5_to_seq = {} md5_to_marker = {} md5_to_genus = {} md5_to_species = {} for seq_source in view: md5 = seq_source.marker_seq.md5 seq = seq_source.marker_seq.sequence genus = seq_source.taxonomy.genus md5_to_seq[md5] = seq try: md5_to_marker[md5].add(seq_source.marker_definition.name) except KeyError: md5_to_marker[md5] = {seq_source.marker_definition.name} if genus: try: md5_to_genus[md5].add(genus) except KeyError: md5_to_genus[md5] = {genus} if seq_source.taxonomy.species: genus_species = genus_species_name(genus, seq_source.taxonomy.species) try: md5_to_species[md5].add(genus_species) except KeyError: md5_to_species[md5] = {genus_species} sys.stderr.write(f'Loaded taxonomy for {len(md5_to_seq)} sequences from DB ') marker_conflicts = 0 genus_conflicts = 0 out_handle.write('#MD5\tLevel\tConflicts\n') for (md5, markers) in sorted(md5_to_marker.items()): if (len(markers) > 1): out_handle.write(f'{md5} marker {';'.join(sorted(markers))} ') marker_conflicts += 1 for (md5, genus) in sorted(md5_to_genus.items()): if (len(genus) > 1): out_handle.write(f'{md5} genus {';'.join(sorted(genus))} ') genus_conflicts += 1 for (md5, species) in sorted(md5_to_species.items()): if (len(species) > 1): out_handle.write(f'{md5} species {';'.join(sorted(species))} ') if (output_filename != '-'): out_handle.close() if debug: sys.stderr.write(f'{marker_conflicts} marker level conflicts ') sys.stderr.write(f'{genus_conflicts} genus level conflicts ') return (marker_conflicts + genus_conflicts)<|docstring|>Implement the ``thapbi_pict conflicts`` subcommand. Looks for taxonomy conflicts at marker, genus or species level, with the number of marker or genus level conflicts used as the return code. i.e. Unix failure (non-zero) when there are marker or genus level conflicts. A marker level conflict is when a unique sequence appears in the DB under more than one marker name (e.g. both COI and ITS1), which is most likely an error in the DB construction. Genus level conflicts are where a unique sequence in the DB is reported from more than one genus, which is considered undesirable. Similarly for species level conflicts, but for some markers this is sadly common and not considered to be an error.<|endoftext|>
03dd2b5ebd2e6e38364e89c4f2e9ea3d90b7a6d797f6dafc1be73c6d6e3dbb08
def read(path): 'Read a file from a path.' with open(os.path.join(*path.split('/'))) as f: return f.read()
Read a file from a path.
setup.py
read
icemac/icemac.callonchange
1
python
def read(path): with open(os.path.join(*path.split('/'))) as f: return f.read()
def read(path): with open(os.path.join(*path.split('/'))) as f: return f.read()<|docstring|>Read a file from a path.<|endoftext|>
97faaf5f14c691edbbcbb3d579452174f9739c8659954476099766c7e1317414
def slstm(c_prev1, c_prev2, x1, x2): "S-LSTM units as an activation function.\n\n This function implements S-LSTM unit. It is an extension of LSTM unit\n applied to tree structures.\n The function is applied to binary trees. Each node has two child nodes.\n It gets four arguments, previous cell states :math:`c_1` and\n :math:`c_2`, and incoming signals :math:`x_1` and :math:`x_2`.\n\n First both input signals :math:`x_1` and :math:`x_2` are split into\n eight arrays :math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`.\n They have the same shape along the second axis.\n It means that :math:`x_1` and :math:`x_2` 's second axis must have 4 times\n the length of :math:`c_{1 \\text{prev}}` and :math:`c_{2 \\text{prev}}`.\n\n The split input signals are corresponding to:\n\n - :math:`a_i` : sources of cell input\n - :math:`i_i` : sources of input gate\n - :math:`f_i` : sources of forget gate\n - :math:`o_i` : sources of output gate\n\n It computes outputs as:\n\n .. math::\n\n c &= \\tanh(a_1 + a_2) \\sigma(i_1 + i_2)\n + c_{1 \\text{prev}} \\sigma(f_1)\n + c_{2 \\text{prev}} \\sigma(f_2), \\\\\n h &= \\tanh(c) \\sigma(o_1 + o_2),\n\n where :math:`\\sigma` is the elementwise sigmoid function.\n The function returns :math:`c` and :math:`h` as a tuple.\n\n Args:\n c_prev1 (~chainer.Variable): Variable that holds the previous cell\n state of the first child node. The cell state should be a zero\n array or the output of the previous call of LSTM.\n c_prev2 (~chainer.Variable): Variable that holds the previous cell\n state of the second child node.\n x1 (~chainer.Variable): Variable that holds the incoming signal from\n the first child node. It must have the second dimension four times\n of that of the cell state,\n x2 (~chainer.Variable): Variable that holds the incoming signal from\n the second child node.\n\n Returns:\n tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is\n the cell state. ``h`` indicates the outgoing signal.\n\n See detail in paper: `Long Short-Term Memory Over Tree Structures <http://arxiv.org/abs/1503.04881>`_.\n\n " return SLSTM()(c_prev1, c_prev2, x1, x2)
S-LSTM units as an activation function. This function implements S-LSTM unit. It is an extension of LSTM unit applied to tree structures. The function is applied to binary trees. Each node has two child nodes. It gets four arguments, previous cell states :math:`c_1` and :math:`c_2`, and incoming signals :math:`x_1` and :math:`x_2`. First both input signals :math:`x_1` and :math:`x_2` are split into eight arrays :math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`. They have the same shape along the second axis. It means that :math:`x_1` and :math:`x_2` 's second axis must have 4 times the length of :math:`c_{1 \text{prev}}` and :math:`c_{2 \text{prev}}`. The split input signals are corresponding to: - :math:`a_i` : sources of cell input - :math:`i_i` : sources of input gate - :math:`f_i` : sources of forget gate - :math:`o_i` : sources of output gate It computes outputs as: .. math:: c &= \tanh(a_1 + a_2) \sigma(i_1 + i_2) + c_{1 \text{prev}} \sigma(f_1) + c_{2 \text{prev}} \sigma(f_2), \\ h &= \tanh(c) \sigma(o_1 + o_2), where :math:`\sigma` is the elementwise sigmoid function. The function returns :math:`c` and :math:`h` as a tuple. Args: c_prev1 (~chainer.Variable): Variable that holds the previous cell state of the first child node. The cell state should be a zero array or the output of the previous call of LSTM. c_prev2 (~chainer.Variable): Variable that holds the previous cell state of the second child node. x1 (~chainer.Variable): Variable that holds the incoming signal from the first child node. It must have the second dimension four times of that of the cell state, x2 (~chainer.Variable): Variable that holds the incoming signal from the second child node. Returns: tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is the cell state. ``h`` indicates the outgoing signal. See detail in paper: `Long Short-Term Memory Over Tree Structures <http://arxiv.org/abs/1503.04881>`_.
chainer/functions/activation/slstm.py
slstm
owruby/chainer
0
python
def slstm(c_prev1, c_prev2, x1, x2): "S-LSTM units as an activation function.\n\n This function implements S-LSTM unit. It is an extension of LSTM unit\n applied to tree structures.\n The function is applied to binary trees. Each node has two child nodes.\n It gets four arguments, previous cell states :math:`c_1` and\n :math:`c_2`, and incoming signals :math:`x_1` and :math:`x_2`.\n\n First both input signals :math:`x_1` and :math:`x_2` are split into\n eight arrays :math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`.\n They have the same shape along the second axis.\n It means that :math:`x_1` and :math:`x_2` 's second axis must have 4 times\n the length of :math:`c_{1 \\text{prev}}` and :math:`c_{2 \\text{prev}}`.\n\n The split input signals are corresponding to:\n\n - :math:`a_i` : sources of cell input\n - :math:`i_i` : sources of input gate\n - :math:`f_i` : sources of forget gate\n - :math:`o_i` : sources of output gate\n\n It computes outputs as:\n\n .. math::\n\n c &= \\tanh(a_1 + a_2) \\sigma(i_1 + i_2)\n + c_{1 \\text{prev}} \\sigma(f_1)\n + c_{2 \\text{prev}} \\sigma(f_2), \\\\\n h &= \\tanh(c) \\sigma(o_1 + o_2),\n\n where :math:`\\sigma` is the elementwise sigmoid function.\n The function returns :math:`c` and :math:`h` as a tuple.\n\n Args:\n c_prev1 (~chainer.Variable): Variable that holds the previous cell\n state of the first child node. The cell state should be a zero\n array or the output of the previous call of LSTM.\n c_prev2 (~chainer.Variable): Variable that holds the previous cell\n state of the second child node.\n x1 (~chainer.Variable): Variable that holds the incoming signal from\n the first child node. It must have the second dimension four times\n of that of the cell state,\n x2 (~chainer.Variable): Variable that holds the incoming signal from\n the second child node.\n\n Returns:\n tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is\n the cell state. ``h`` indicates the outgoing signal.\n\n See detail in paper: `Long Short-Term Memory Over Tree Structures <http://arxiv.org/abs/1503.04881>`_.\n\n " return SLSTM()(c_prev1, c_prev2, x1, x2)
def slstm(c_prev1, c_prev2, x1, x2): "S-LSTM units as an activation function.\n\n This function implements S-LSTM unit. It is an extension of LSTM unit\n applied to tree structures.\n The function is applied to binary trees. Each node has two child nodes.\n It gets four arguments, previous cell states :math:`c_1` and\n :math:`c_2`, and incoming signals :math:`x_1` and :math:`x_2`.\n\n First both input signals :math:`x_1` and :math:`x_2` are split into\n eight arrays :math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`.\n They have the same shape along the second axis.\n It means that :math:`x_1` and :math:`x_2` 's second axis must have 4 times\n the length of :math:`c_{1 \\text{prev}}` and :math:`c_{2 \\text{prev}}`.\n\n The split input signals are corresponding to:\n\n - :math:`a_i` : sources of cell input\n - :math:`i_i` : sources of input gate\n - :math:`f_i` : sources of forget gate\n - :math:`o_i` : sources of output gate\n\n It computes outputs as:\n\n .. math::\n\n c &= \\tanh(a_1 + a_2) \\sigma(i_1 + i_2)\n + c_{1 \\text{prev}} \\sigma(f_1)\n + c_{2 \\text{prev}} \\sigma(f_2), \\\\\n h &= \\tanh(c) \\sigma(o_1 + o_2),\n\n where :math:`\\sigma` is the elementwise sigmoid function.\n The function returns :math:`c` and :math:`h` as a tuple.\n\n Args:\n c_prev1 (~chainer.Variable): Variable that holds the previous cell\n state of the first child node. The cell state should be a zero\n array or the output of the previous call of LSTM.\n c_prev2 (~chainer.Variable): Variable that holds the previous cell\n state of the second child node.\n x1 (~chainer.Variable): Variable that holds the incoming signal from\n the first child node. It must have the second dimension four times\n of that of the cell state,\n x2 (~chainer.Variable): Variable that holds the incoming signal from\n the second child node.\n\n Returns:\n tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is\n the cell state. ``h`` indicates the outgoing signal.\n\n See detail in paper: `Long Short-Term Memory Over Tree Structures <http://arxiv.org/abs/1503.04881>`_.\n\n " return SLSTM()(c_prev1, c_prev2, x1, x2)<|docstring|>S-LSTM units as an activation function. This function implements S-LSTM unit. It is an extension of LSTM unit applied to tree structures. The function is applied to binary trees. Each node has two child nodes. It gets four arguments, previous cell states :math:`c_1` and :math:`c_2`, and incoming signals :math:`x_1` and :math:`x_2`. First both input signals :math:`x_1` and :math:`x_2` are split into eight arrays :math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`. They have the same shape along the second axis. It means that :math:`x_1` and :math:`x_2` 's second axis must have 4 times the length of :math:`c_{1 \text{prev}}` and :math:`c_{2 \text{prev}}`. The split input signals are corresponding to: - :math:`a_i` : sources of cell input - :math:`i_i` : sources of input gate - :math:`f_i` : sources of forget gate - :math:`o_i` : sources of output gate It computes outputs as: .. math:: c &= \tanh(a_1 + a_2) \sigma(i_1 + i_2) + c_{1 \text{prev}} \sigma(f_1) + c_{2 \text{prev}} \sigma(f_2), \\ h &= \tanh(c) \sigma(o_1 + o_2), where :math:`\sigma` is the elementwise sigmoid function. The function returns :math:`c` and :math:`h` as a tuple. Args: c_prev1 (~chainer.Variable): Variable that holds the previous cell state of the first child node. The cell state should be a zero array or the output of the previous call of LSTM. c_prev2 (~chainer.Variable): Variable that holds the previous cell state of the second child node. x1 (~chainer.Variable): Variable that holds the incoming signal from the first child node. It must have the second dimension four times of that of the cell state, x2 (~chainer.Variable): Variable that holds the incoming signal from the second child node. Returns: tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is the cell state. ``h`` indicates the outgoing signal. See detail in paper: `Long Short-Term Memory Over Tree Structures <http://arxiv.org/abs/1503.04881>`_.<|endoftext|>
0d9223ba02b17d2aa8aee5067344808254820a8fd2e1648191b83285a999afce
def __init__(self, *, context: typing.List[str], data_agreement_id: str=None, data_agreement_version: int=None, data_agreement_template_id: str=None, data_agreement_template_version: int=None, pii_controller_name: str=None, pii_controller_url: str=None, usage_purpose: str=None, usage_purpose_description: str=None, legal_basis: str=None, method_of_use: str=None, data_policy: DataAgreementDataPolicy=None, personal_data: typing.List[DataAgreementPersonalData]=None, dpia: DataAgreementDPIA=None, event: typing.List[DataAgreementEvent]=None, proof_chain: typing.List[DataAgreementProof]=None, principle_did: str=None, proof: DataAgreementProof=None, **kwargs): 'Data Agreement instance init' super().__init__(**kwargs) self.context = context self.data_agreement_id = data_agreement_id self.data_agreement_version = data_agreement_version self.data_agreement_template_id = data_agreement_template_id self.data_agreement_template_version = data_agreement_template_version self.pii_controller_name = pii_controller_name self.pii_controller_url = pii_controller_url self.usage_purpose = usage_purpose self.usage_purpose_description = usage_purpose_description self.legal_basis = legal_basis self.method_of_use = method_of_use self.data_policy = data_policy self.personal_data = personal_data self.dpia = dpia self.proof_chain = proof_chain self.principle_did = principle_did self.event = event self.proof = proof
Data Agreement instance init
mydata_did/v1_0/models/data_agreement_instance_model.py
__init__
decentralised-dataexchange/acapy-mydata-did-protocol
1
python
def __init__(self, *, context: typing.List[str], data_agreement_id: str=None, data_agreement_version: int=None, data_agreement_template_id: str=None, data_agreement_template_version: int=None, pii_controller_name: str=None, pii_controller_url: str=None, usage_purpose: str=None, usage_purpose_description: str=None, legal_basis: str=None, method_of_use: str=None, data_policy: DataAgreementDataPolicy=None, personal_data: typing.List[DataAgreementPersonalData]=None, dpia: DataAgreementDPIA=None, event: typing.List[DataAgreementEvent]=None, proof_chain: typing.List[DataAgreementProof]=None, principle_did: str=None, proof: DataAgreementProof=None, **kwargs): super().__init__(**kwargs) self.context = context self.data_agreement_id = data_agreement_id self.data_agreement_version = data_agreement_version self.data_agreement_template_id = data_agreement_template_id self.data_agreement_template_version = data_agreement_template_version self.pii_controller_name = pii_controller_name self.pii_controller_url = pii_controller_url self.usage_purpose = usage_purpose self.usage_purpose_description = usage_purpose_description self.legal_basis = legal_basis self.method_of_use = method_of_use self.data_policy = data_policy self.personal_data = personal_data self.dpia = dpia self.proof_chain = proof_chain self.principle_did = principle_did self.event = event self.proof = proof
def __init__(self, *, context: typing.List[str], data_agreement_id: str=None, data_agreement_version: int=None, data_agreement_template_id: str=None, data_agreement_template_version: int=None, pii_controller_name: str=None, pii_controller_url: str=None, usage_purpose: str=None, usage_purpose_description: str=None, legal_basis: str=None, method_of_use: str=None, data_policy: DataAgreementDataPolicy=None, personal_data: typing.List[DataAgreementPersonalData]=None, dpia: DataAgreementDPIA=None, event: typing.List[DataAgreementEvent]=None, proof_chain: typing.List[DataAgreementProof]=None, principle_did: str=None, proof: DataAgreementProof=None, **kwargs): super().__init__(**kwargs) self.context = context self.data_agreement_id = data_agreement_id self.data_agreement_version = data_agreement_version self.data_agreement_template_id = data_agreement_template_id self.data_agreement_template_version = data_agreement_template_version self.pii_controller_name = pii_controller_name self.pii_controller_url = pii_controller_url self.usage_purpose = usage_purpose self.usage_purpose_description = usage_purpose_description self.legal_basis = legal_basis self.method_of_use = method_of_use self.data_policy = data_policy self.personal_data = personal_data self.dpia = dpia self.proof_chain = proof_chain self.principle_did = principle_did self.event = event self.proof = proof<|docstring|>Data Agreement instance init<|endoftext|>
5755806fb17c30fcd6d4de9f25e1bb4f21dc3556da61a996746b8761a896d550
def initialize_streamlit() -> None: 'クラスを定義する前にweb上で画面を出しておく\n 状態量として, 試合数, 経験値, レベル, 連勝数を定義し, 初期化しておく(マジックコマンド的な)\n : rtype : None\n : return : なし\n ' (st.session_state.col1, st.session_state.col2) = st.columns([4, 1]) (st.session_state.col4, st.session_state.space, st.session_state.col6) = st.columns([7, 1, 4]) st.session_state.col1.title('Welcome to Hit&Blow Game!16進数5桁の数字を当てよう!') st.session_state.col1.subheader('対戦すると経験値がもらえるよ. 経験値は当てた回数や連勝数に応じて増えるぞ!') st.session_state.col1.subheader('経験値が貯まるとレベルアップだ!いずれはキャラが進化するかも‥?') if ('game_count' not in st.session_state): st.session_state.game_count = 0 if ('exp' not in st.session_state): st.session_state.exp = 0 if ('level' not in st.session_state): st.session_state.level = 1 if ('win_in_a_row' not in st.session_state): st.session_state.win_in_a_row = 0 name = st.session_state.col4.selectbox('キャラクターを選んでね', ['ジャック', 'クリス', 'フローラ', 'ドロシー']) st.session_state.chara_name = name pygame.mixer.init() sound_waiting = pygame.mixer.Sound((('voice/' + st.session_state.chara_name) + '/waiting.wav')) sound_waiting.play() pic_url1 = (('picture/' + name) + '-1.jpg') pic_url2 = (('picture/' + name) + '-2.jpg') if (st.session_state.level < 20): image = Image.open(pic_url1) st.session_state.col4.image(image) else: image = Image.open(pic_url2) st.session_state.col4.image(image)
クラスを定義する前にweb上で画面を出しておく 状態量として, 試合数, 経験値, レベル, 連勝数を定義し, 初期化しておく(マジックコマンド的な) : rtype : None : return : なし
hitblow/class_play_game.py
initialize_streamlit
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def initialize_streamlit() -> None: 'クラスを定義する前にweb上で画面を出しておく\n 状態量として, 試合数, 経験値, レベル, 連勝数を定義し, 初期化しておく(マジックコマンド的な)\n : rtype : None\n : return : なし\n ' (st.session_state.col1, st.session_state.col2) = st.columns([4, 1]) (st.session_state.col4, st.session_state.space, st.session_state.col6) = st.columns([7, 1, 4]) st.session_state.col1.title('Welcome to Hit&Blow Game!16進数5桁の数字を当てよう!') st.session_state.col1.subheader('対戦すると経験値がもらえるよ. 経験値は当てた回数や連勝数に応じて増えるぞ!') st.session_state.col1.subheader('経験値が貯まるとレベルアップだ!いずれはキャラが進化するかも‥?') if ('game_count' not in st.session_state): st.session_state.game_count = 0 if ('exp' not in st.session_state): st.session_state.exp = 0 if ('level' not in st.session_state): st.session_state.level = 1 if ('win_in_a_row' not in st.session_state): st.session_state.win_in_a_row = 0 name = st.session_state.col4.selectbox('キャラクターを選んでね', ['ジャック', 'クリス', 'フローラ', 'ドロシー']) st.session_state.chara_name = name pygame.mixer.init() sound_waiting = pygame.mixer.Sound((('voice/' + st.session_state.chara_name) + '/waiting.wav')) sound_waiting.play() pic_url1 = (('picture/' + name) + '-1.jpg') pic_url2 = (('picture/' + name) + '-2.jpg') if (st.session_state.level < 20): image = Image.open(pic_url1) st.session_state.col4.image(image) else: image = Image.open(pic_url2) st.session_state.col4.image(image)
def initialize_streamlit() -> None: 'クラスを定義する前にweb上で画面を出しておく\n 状態量として, 試合数, 経験値, レベル, 連勝数を定義し, 初期化しておく(マジックコマンド的な)\n : rtype : None\n : return : なし\n ' (st.session_state.col1, st.session_state.col2) = st.columns([4, 1]) (st.session_state.col4, st.session_state.space, st.session_state.col6) = st.columns([7, 1, 4]) st.session_state.col1.title('Welcome to Hit&Blow Game!16進数5桁の数字を当てよう!') st.session_state.col1.subheader('対戦すると経験値がもらえるよ. 経験値は当てた回数や連勝数に応じて増えるぞ!') st.session_state.col1.subheader('経験値が貯まるとレベルアップだ!いずれはキャラが進化するかも‥?') if ('game_count' not in st.session_state): st.session_state.game_count = 0 if ('exp' not in st.session_state): st.session_state.exp = 0 if ('level' not in st.session_state): st.session_state.level = 1 if ('win_in_a_row' not in st.session_state): st.session_state.win_in_a_row = 0 name = st.session_state.col4.selectbox('キャラクターを選んでね', ['ジャック', 'クリス', 'フローラ', 'ドロシー']) st.session_state.chara_name = name pygame.mixer.init() sound_waiting = pygame.mixer.Sound((('voice/' + st.session_state.chara_name) + '/waiting.wav')) sound_waiting.play() pic_url1 = (('picture/' + name) + '-1.jpg') pic_url2 = (('picture/' + name) + '-2.jpg') if (st.session_state.level < 20): image = Image.open(pic_url1) st.session_state.col4.image(image) else: image = Image.open(pic_url2) st.session_state.col4.image(image)<|docstring|>クラスを定義する前にweb上で画面を出しておく 状態量として, 試合数, 経験値, レベル, 連勝数を定義し, 初期化しておく(マジックコマンド的な) : rtype : None : return : なし<|endoftext|>
83d7263c77eb4a56d709af491e586d64596c2528c4ddb6e36c94026f2e04a102
def __init__(self, ans=None, room_id=6000) -> None: 'コンストラクタ\n :param str ans : 自分の答え(相手に当ててもらう数字)\n :param str room_id : room id(6000~6999)\n : rtype : None\n : return : なし\n ' self.digits = 5 self.Tuple_16 = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f') if (ans is not None): self.ans = ans else: self.ans = self._define_hidden_number_random() self.url = 'https://damp-earth-70561.herokuapp.com' self.room_id = str(room_id) self.room_state = 1 self.player_id_F = 'e6e4dcbe-ec3c-4c2a-b228-67d1acee3c81' self.player_id_F2 = '19dfceb3-46be-4d0e-94e2-3aa3333a4442' self.player_name = 'F' self.opponent_name = None self.now_player = None self.headers = {'content-Type': 'application/json'} self.list_num_place = [] self.list_possible_ans_combination = [] self.list_ans_combination = [] self.list_possible_ans = [] self.num = None self.hit = None self.blow = None self.my_history = None self.opponent_history = None self.count = 0 self.winner = None self.volume = 0.3 self.remaining_exp_level = 0
コンストラクタ :param str ans : 自分の答え(相手に当ててもらう数字) :param str room_id : room id(6000~6999) : rtype : None : return : なし
hitblow/class_play_game.py
__init__
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def __init__(self, ans=None, room_id=6000) -> None: 'コンストラクタ\n :param str ans : 自分の答え(相手に当ててもらう数字)\n :param str room_id : room id(6000~6999)\n : rtype : None\n : return : なし\n ' self.digits = 5 self.Tuple_16 = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f') if (ans is not None): self.ans = ans else: self.ans = self._define_hidden_number_random() self.url = 'https://damp-earth-70561.herokuapp.com' self.room_id = str(room_id) self.room_state = 1 self.player_id_F = 'e6e4dcbe-ec3c-4c2a-b228-67d1acee3c81' self.player_id_F2 = '19dfceb3-46be-4d0e-94e2-3aa3333a4442' self.player_name = 'F' self.opponent_name = None self.now_player = None self.headers = {'content-Type': 'application/json'} self.list_num_place = [] self.list_possible_ans_combination = [] self.list_ans_combination = [] self.list_possible_ans = [] self.num = None self.hit = None self.blow = None self.my_history = None self.opponent_history = None self.count = 0 self.winner = None self.volume = 0.3 self.remaining_exp_level = 0
def __init__(self, ans=None, room_id=6000) -> None: 'コンストラクタ\n :param str ans : 自分の答え(相手に当ててもらう数字)\n :param str room_id : room id(6000~6999)\n : rtype : None\n : return : なし\n ' self.digits = 5 self.Tuple_16 = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f') if (ans is not None): self.ans = ans else: self.ans = self._define_hidden_number_random() self.url = 'https://damp-earth-70561.herokuapp.com' self.room_id = str(room_id) self.room_state = 1 self.player_id_F = 'e6e4dcbe-ec3c-4c2a-b228-67d1acee3c81' self.player_id_F2 = '19dfceb3-46be-4d0e-94e2-3aa3333a4442' self.player_name = 'F' self.opponent_name = None self.now_player = None self.headers = {'content-Type': 'application/json'} self.list_num_place = [] self.list_possible_ans_combination = [] self.list_ans_combination = [] self.list_possible_ans = [] self.num = None self.hit = None self.blow = None self.my_history = None self.opponent_history = None self.count = 0 self.winner = None self.volume = 0.3 self.remaining_exp_level = 0<|docstring|>コンストラクタ :param str ans : 自分の答え(相手に当ててもらう数字) :param str room_id : room id(6000~6999) : rtype : None : return : なし<|endoftext|>
24fc15df0ec479cc34e8d881e1d3ec28e7dd632323ed8feb3f5d7490ae5f37b7
def _define_hidden_number_random(self) -> str: '相手に当ててもらう答えをつくる\n : rtype : str\n : return : ans\n ' ans_list = random.sample(self.Tuple_16, self.digits) ans = ''.join(ans_list) return ans
相手に当ててもらう答えをつくる : rtype : str : return : ans
hitblow/class_play_game.py
_define_hidden_number_random
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _define_hidden_number_random(self) -> str: '相手に当ててもらう答えをつくる\n : rtype : str\n : return : ans\n ' ans_list = random.sample(self.Tuple_16, self.digits) ans = .join(ans_list) return ans
def _define_hidden_number_random(self) -> str: '相手に当ててもらう答えをつくる\n : rtype : str\n : return : ans\n ' ans_list = random.sample(self.Tuple_16, self.digits) ans = .join(ans_list) return ans<|docstring|>相手に当ててもらう答えをつくる : rtype : str : return : ans<|endoftext|>
3ae72f52a957b7f3fbbf8aa498b80d4a7f04bd634764fab484d70f7ccd58078a
def _play_song(self, num: int, title) -> None: '音楽再生\n :param int num:再生回数(-1で無限ループ,これを使って止めたいときにstopするのが良いかと)\n :param int playtime:再生時間(基本-1で無限ループしてるので、使わない.デフォルト値Noneで良い)\n :param str title:bgmフォルダ内にあるmp3ファイルを再生\n : rtype : None\n : return : なし\n ' pygame.mixer.init() pygame.mixer.music.load(title) pygame.mixer.music.set_volume(self.volume) pygame.mixer.music.play(num)
音楽再生 :param int num:再生回数(-1で無限ループ,これを使って止めたいときにstopするのが良いかと) :param int playtime:再生時間(基本-1で無限ループしてるので、使わない.デフォルト値Noneで良い) :param str title:bgmフォルダ内にあるmp3ファイルを再生 : rtype : None : return : なし
hitblow/class_play_game.py
_play_song
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _play_song(self, num: int, title) -> None: '音楽再生\n :param int num:再生回数(-1で無限ループ,これを使って止めたいときにstopするのが良いかと)\n :param int playtime:再生時間(基本-1で無限ループしてるので、使わない.デフォルト値Noneで良い)\n :param str title:bgmフォルダ内にあるmp3ファイルを再生\n : rtype : None\n : return : なし\n ' pygame.mixer.init() pygame.mixer.music.load(title) pygame.mixer.music.set_volume(self.volume) pygame.mixer.music.play(num)
def _play_song(self, num: int, title) -> None: '音楽再生\n :param int num:再生回数(-1で無限ループ,これを使って止めたいときにstopするのが良いかと)\n :param int playtime:再生時間(基本-1で無限ループしてるので、使わない.デフォルト値Noneで良い)\n :param str title:bgmフォルダ内にあるmp3ファイルを再生\n : rtype : None\n : return : なし\n ' pygame.mixer.init() pygame.mixer.music.load(title) pygame.mixer.music.set_volume(self.volume) pygame.mixer.music.play(num)<|docstring|>音楽再生 :param int num:再生回数(-1で無限ループ,これを使って止めたいときにstopするのが良いかと) :param int playtime:再生時間(基本-1で無限ループしてるので、使わない.デフォルト値Noneで良い) :param str title:bgmフォルダ内にあるmp3ファイルを再生 : rtype : None : return : なし<|endoftext|>
45cdd2ebb208b571ab3d84e191651ce1b7abc1346a48e97daa2a7742e3437725
def _music_stop(self) -> None: '再生中の音楽停止\n : rtype : None\n : return : なし\n ' pygame.mixer.music.stop()
再生中の音楽停止 : rtype : None : return : なし
hitblow/class_play_game.py
_music_stop
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _music_stop(self) -> None: '再生中の音楽停止\n : rtype : None\n : return : なし\n ' pygame.mixer.music.stop()
def _music_stop(self) -> None: '再生中の音楽停止\n : rtype : None\n : return : なし\n ' pygame.mixer.music.stop()<|docstring|>再生中の音楽停止 : rtype : None : return : なし<|endoftext|>
1c66b85d19ea0f92ec3dbfc05f0735021cbc2b5e9e2e8849b2005f8412439033
def _get_table_by_API(self) -> None: 'APIを用いてサーバーから部屋の状態,ターン,履歴を取得(ループで何回も使用)\n : rtype : None\n : return : なし\n ' url_get_table = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table') result = session.get(url_get_table) data = result.json() self.room_state = data['state'] self.now_player = data['now_player'] self.my_history = data['table']
APIを用いてサーバーから部屋の状態,ターン,履歴を取得(ループで何回も使用) : rtype : None : return : なし
hitblow/class_play_game.py
_get_table_by_API
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _get_table_by_API(self) -> None: 'APIを用いてサーバーから部屋の状態,ターン,履歴を取得(ループで何回も使用)\n : rtype : None\n : return : なし\n ' url_get_table = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table') result = session.get(url_get_table) data = result.json() self.room_state = data['state'] self.now_player = data['now_player'] self.my_history = data['table']
def _get_table_by_API(self) -> None: 'APIを用いてサーバーから部屋の状態,ターン,履歴を取得(ループで何回も使用)\n : rtype : None\n : return : なし\n ' url_get_table = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table') result = session.get(url_get_table) data = result.json() self.room_state = data['state'] self.now_player = data['now_player'] self.my_history = data['table']<|docstring|>APIを用いてサーバーから部屋の状態,ターン,履歴を取得(ループで何回も使用) : rtype : None : return : なし<|endoftext|>
d745e45ce7ab683d8300bfcbe9eb45d36152d8e11eeb9a1604b360587d792446
def _post_guess_by_API(self) -> None: 'APIを用いてサーバーに予想した相手の数字(self.num)をポスト\n : rtype : None\n : return : なし\n ' url_post_guess = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table/guesses') post_data = {'player_id': self.player_id_F, 'guess': self.num} session.post(url_post_guess, headers=self.headers, json=post_data)
APIを用いてサーバーに予想した相手の数字(self.num)をポスト : rtype : None : return : なし
hitblow/class_play_game.py
_post_guess_by_API
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _post_guess_by_API(self) -> None: 'APIを用いてサーバーに予想した相手の数字(self.num)をポスト\n : rtype : None\n : return : なし\n ' url_post_guess = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table/guesses') post_data = {'player_id': self.player_id_F, 'guess': self.num} session.post(url_post_guess, headers=self.headers, json=post_data)
def _post_guess_by_API(self) -> None: 'APIを用いてサーバーに予想した相手の数字(self.num)をポスト\n : rtype : None\n : return : なし\n ' url_post_guess = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table/guesses') post_data = {'player_id': self.player_id_F, 'guess': self.num} session.post(url_post_guess, headers=self.headers, json=post_data)<|docstring|>APIを用いてサーバーに予想した相手の数字(self.num)をポスト : rtype : None : return : なし<|endoftext|>
d38127081941af686e1e8ef90146141ae19c129b4d66bd16547d939349563b14
def _enterroom_and_registerplayer(self) -> None: '部屋を作成し, 部屋に入り, 相手が来るまで待機\n 3秒ごとに相手が来ているか確認して,相手が来たらゲームスタート\n : rtype : None\n : return : なし\n ' url_enter_room = (self.url + '/rooms') post_data = {'player_id': self.player_id_F, 'room_id': self.room_id} session.post(url_enter_room, headers=self.headers, json=post_data) while (self.room_state == 1): url_get_room = ((self.url + '/rooms/') + str(self.room_id)) result = session.get(url_get_room) data = result.json() self.room_state = data['state'] time.sleep(3) self._music_stop() self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/game_start.wav')) self._play_song(num=1, title='bgm/game_start.wav') time.sleep(3) self._play_song(num=(- 1), title='bgm/Battle.wav') self.opponent_name = (data['player2'] if (data['player1'] == 'F') else data['player1']) self.now_player = data['player1']
部屋を作成し, 部屋に入り, 相手が来るまで待機 3秒ごとに相手が来ているか確認して,相手が来たらゲームスタート : rtype : None : return : なし
hitblow/class_play_game.py
_enterroom_and_registerplayer
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _enterroom_and_registerplayer(self) -> None: '部屋を作成し, 部屋に入り, 相手が来るまで待機\n 3秒ごとに相手が来ているか確認して,相手が来たらゲームスタート\n : rtype : None\n : return : なし\n ' url_enter_room = (self.url + '/rooms') post_data = {'player_id': self.player_id_F, 'room_id': self.room_id} session.post(url_enter_room, headers=self.headers, json=post_data) while (self.room_state == 1): url_get_room = ((self.url + '/rooms/') + str(self.room_id)) result = session.get(url_get_room) data = result.json() self.room_state = data['state'] time.sleep(3) self._music_stop() self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/game_start.wav')) self._play_song(num=1, title='bgm/game_start.wav') time.sleep(3) self._play_song(num=(- 1), title='bgm/Battle.wav') self.opponent_name = (data['player2'] if (data['player1'] == 'F') else data['player1']) self.now_player = data['player1']
def _enterroom_and_registerplayer(self) -> None: '部屋を作成し, 部屋に入り, 相手が来るまで待機\n 3秒ごとに相手が来ているか確認して,相手が来たらゲームスタート\n : rtype : None\n : return : なし\n ' url_enter_room = (self.url + '/rooms') post_data = {'player_id': self.player_id_F, 'room_id': self.room_id} session.post(url_enter_room, headers=self.headers, json=post_data) while (self.room_state == 1): url_get_room = ((self.url + '/rooms/') + str(self.room_id)) result = session.get(url_get_room) data = result.json() self.room_state = data['state'] time.sleep(3) self._music_stop() self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/game_start.wav')) self._play_song(num=1, title='bgm/game_start.wav') time.sleep(3) self._play_song(num=(- 1), title='bgm/Battle.wav') self.opponent_name = (data['player2'] if (data['player1'] == 'F') else data['player1']) self.now_player = data['player1']<|docstring|>部屋を作成し, 部屋に入り, 相手が来るまで待機 3秒ごとに相手が来ているか確認して,相手が来たらゲームスタート : rtype : None : return : なし<|endoftext|>
d2c260e4a7e8c15adc208fab88604f07257d156822ba15bee91a7e78f7338cc4
def _post_hidden_number(self) -> None: 'APIを用いてサーバーに自分の答えをポスト(初回のみ)\n : rtype : None\n : return : なし\n ' url_post_hidden_number = (((((self.url + '/rooms/') + self.room_id) + '/players/') + self.player_name) + '/hidden') post_data = {'player_id': self.player_id_F, 'hidden_number': self.ans} session.post(url_post_hidden_number, headers=self.headers, json=post_data)
APIを用いてサーバーに自分の答えをポスト(初回のみ) : rtype : None : return : なし
hitblow/class_play_game.py
_post_hidden_number
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _post_hidden_number(self) -> None: 'APIを用いてサーバーに自分の答えをポスト(初回のみ)\n : rtype : None\n : return : なし\n ' url_post_hidden_number = (((((self.url + '/rooms/') + self.room_id) + '/players/') + self.player_name) + '/hidden') post_data = {'player_id': self.player_id_F, 'hidden_number': self.ans} session.post(url_post_hidden_number, headers=self.headers, json=post_data)
def _post_hidden_number(self) -> None: 'APIを用いてサーバーに自分の答えをポスト(初回のみ)\n : rtype : None\n : return : なし\n ' url_post_hidden_number = (((((self.url + '/rooms/') + self.room_id) + '/players/') + self.player_name) + '/hidden') post_data = {'player_id': self.player_id_F, 'hidden_number': self.ans} session.post(url_post_hidden_number, headers=self.headers, json=post_data)<|docstring|>APIを用いてサーバーに自分の答えをポスト(初回のみ) : rtype : None : return : なし<|endoftext|>
b8360dd7d84737154eac2f2ecf1c9b96abd3a1aa297029ca1cfda192e14eb1df
def _first_3_times(self) -> None: '進化前,最初に行う, 答えとなる数字がどのグループに何個あるのか特定\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, 1,2,3回目に01234,56789,abcdeを選んでself.numに格納\n post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納\n remove_impossible_combinationを行う\n 自分のターンで無かったら, 1秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' search_list = ['01234', '56789', 'abcde'] while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name) and (self.count != 3)): print('{}回目の入力です.'.format((self.count + 1))) self.num = search_list[self.count] self.count += 1 self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] self.list_num_place.append((self.hit + self.blow)) print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.count == 3) or (self.room_state == 3)): break else: time.sleep(0.5) continue
進化前,最初に行う, 答えとなる数字がどのグループに何個あるのか特定 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, 1,2,3回目に01234,56789,abcdeを選んでself.numに格納 post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納 remove_impossible_combinationを行う 自分のターンで無かったら, 1秒待機 試合終了だったらループを抜ける : rtype : None : return : なし
hitblow/class_play_game.py
_first_3_times
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _first_3_times(self) -> None: '進化前,最初に行う, 答えとなる数字がどのグループに何個あるのか特定\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, 1,2,3回目に01234,56789,abcdeを選んでself.numに格納\n post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納\n remove_impossible_combinationを行う\n 自分のターンで無かったら, 1秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' search_list = ['01234', '56789', 'abcde'] while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name) and (self.count != 3)): print('{}回目の入力です.'.format((self.count + 1))) self.num = search_list[self.count] self.count += 1 self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] self.list_num_place.append((self.hit + self.blow)) print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.count == 3) or (self.room_state == 3)): break else: time.sleep(0.5) continue
def _first_3_times(self) -> None: '進化前,最初に行う, 答えとなる数字がどのグループに何個あるのか特定\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, 1,2,3回目に01234,56789,abcdeを選んでself.numに格納\n post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納\n remove_impossible_combinationを行う\n 自分のターンで無かったら, 1秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' search_list = ['01234', '56789', 'abcde'] while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name) and (self.count != 3)): print('{}回目の入力です.'.format((self.count + 1))) self.num = search_list[self.count] self.count += 1 self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] self.list_num_place.append((self.hit + self.blow)) print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.count == 3) or (self.room_state == 3)): break else: time.sleep(0.5) continue<|docstring|>進化前,最初に行う, 答えとなる数字がどのグループに何個あるのか特定 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, 1,2,3回目に01234,56789,abcdeを選んでself.numに格納 post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納 remove_impossible_combinationを行う 自分のターンで無かったら, 1秒待機 試合終了だったらループを抜ける : rtype : None : return : なし<|endoftext|>
b9c77488146db01f7674e27d689dcccdfff9b9e775a0d534f35c160841441a26
def _make_list_possible_ans_combination_3(self) -> None: '進化前,2番目に行う\n 最初の3回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し,\n list_possible_ans_combinationに格納\n : rtype : None\n : return : なし\n ' for i in itertools.combinations('01234', self.list_num_place[0]): for j in itertools.combinations('56789', self.list_num_place[1]): for k in itertools.combinations('abcde', self.list_num_place[2]): for l in itertools.combinations('f', (self.digits - sum(self.list_num_place))): n = ''.join((((i + j) + k) + l)) self.list_possible_ans_combination.append(n)
進化前,2番目に行う 最初の3回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し, list_possible_ans_combinationに格納 : rtype : None : return : なし
hitblow/class_play_game.py
_make_list_possible_ans_combination_3
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _make_list_possible_ans_combination_3(self) -> None: '進化前,2番目に行う\n 最初の3回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し,\n list_possible_ans_combinationに格納\n : rtype : None\n : return : なし\n ' for i in itertools.combinations('01234', self.list_num_place[0]): for j in itertools.combinations('56789', self.list_num_place[1]): for k in itertools.combinations('abcde', self.list_num_place[2]): for l in itertools.combinations('f', (self.digits - sum(self.list_num_place))): n = .join((((i + j) + k) + l)) self.list_possible_ans_combination.append(n)
def _make_list_possible_ans_combination_3(self) -> None: '進化前,2番目に行う\n 最初の3回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し,\n list_possible_ans_combinationに格納\n : rtype : None\n : return : なし\n ' for i in itertools.combinations('01234', self.list_num_place[0]): for j in itertools.combinations('56789', self.list_num_place[1]): for k in itertools.combinations('abcde', self.list_num_place[2]): for l in itertools.combinations('f', (self.digits - sum(self.list_num_place))): n = .join((((i + j) + k) + l)) self.list_possible_ans_combination.append(n)<|docstring|>進化前,2番目に行う 最初の3回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し, list_possible_ans_combinationに格納 : rtype : None : return : なし<|endoftext|>
bdac3ca3977bcb4035c188b0cdba8f3cd520fb9f9d14b368f30832a205a9935f
def _first_2_times(self) -> None: '進化後, 最初に行う, 答えとなる数字がどのグループに何個あるのか特定\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, 1,2回目に01234,56789を選んでself.numに格納\n post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納\n remove_impossible_combinationを行う\n 自分のターンで無かったら, 1秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' search_list = ['01234', '56789'] while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name) and (self.count != 2)): print('{}回目の入力です.'.format((self.count + 1))) self.num = search_list[self.count] self.count += 1 self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] self.list_num_place.append((self.hit + self.blow)) print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.count == 2) or (self.room_state == 3)): break else: time.sleep(0.5) continue
進化後, 最初に行う, 答えとなる数字がどのグループに何個あるのか特定 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, 1,2回目に01234,56789を選んでself.numに格納 post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納 remove_impossible_combinationを行う 自分のターンで無かったら, 1秒待機 試合終了だったらループを抜ける : rtype : None : return : なし
hitblow/class_play_game.py
_first_2_times
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _first_2_times(self) -> None: '進化後, 最初に行う, 答えとなる数字がどのグループに何個あるのか特定\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, 1,2回目に01234,56789を選んでself.numに格納\n post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納\n remove_impossible_combinationを行う\n 自分のターンで無かったら, 1秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' search_list = ['01234', '56789'] while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name) and (self.count != 2)): print('{}回目の入力です.'.format((self.count + 1))) self.num = search_list[self.count] self.count += 1 self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] self.list_num_place.append((self.hit + self.blow)) print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.count == 2) or (self.room_state == 3)): break else: time.sleep(0.5) continue
def _first_2_times(self) -> None: '進化後, 最初に行う, 答えとなる数字がどのグループに何個あるのか特定\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, 1,2回目に01234,56789を選んでself.numに格納\n post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納\n remove_impossible_combinationを行う\n 自分のターンで無かったら, 1秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' search_list = ['01234', '56789'] while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name) and (self.count != 2)): print('{}回目の入力です.'.format((self.count + 1))) self.num = search_list[self.count] self.count += 1 self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] self.list_num_place.append((self.hit + self.blow)) print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.count == 2) or (self.room_state == 3)): break else: time.sleep(0.5) continue<|docstring|>進化後, 最初に行う, 答えとなる数字がどのグループに何個あるのか特定 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, 1,2回目に01234,56789を選んでself.numに格納 post_guessし, 帰ってきたhit,blowの和をlist_ans_numに格納 remove_impossible_combinationを行う 自分のターンで無かったら, 1秒待機 試合終了だったらループを抜ける : rtype : None : return : なし<|endoftext|>
a95e91e504d6fe5d23f862f5cb984a44b8ccf5bdb5b1e71370a847ab7e54c3c0
def _make_list_possible_ans_combination(self) -> None: '進化後, 2番目に行う\n 最初の2回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し,\n list_possible_ans_combinationに格納\n : rtype : None\n : return : なし\n ' for i in itertools.combinations('01234', self.list_num_place[0]): for j in itertools.combinations('56789', self.list_num_place[1]): for k in itertools.combinations('abcdef', (self.digits - sum(self.list_num_place))): n = ''.join(((i + j) + k)) self.list_possible_ans_combination.append(n)
進化後, 2番目に行う 最初の2回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し, list_possible_ans_combinationに格納 : rtype : None : return : なし
hitblow/class_play_game.py
_make_list_possible_ans_combination
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _make_list_possible_ans_combination(self) -> None: '進化後, 2番目に行う\n 最初の2回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し,\n list_possible_ans_combinationに格納\n : rtype : None\n : return : なし\n ' for i in itertools.combinations('01234', self.list_num_place[0]): for j in itertools.combinations('56789', self.list_num_place[1]): for k in itertools.combinations('abcdef', (self.digits - sum(self.list_num_place))): n = .join(((i + j) + k)) self.list_possible_ans_combination.append(n)
def _make_list_possible_ans_combination(self) -> None: '進化後, 2番目に行う\n 最初の2回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し,\n list_possible_ans_combinationに格納\n : rtype : None\n : return : なし\n ' for i in itertools.combinations('01234', self.list_num_place[0]): for j in itertools.combinations('56789', self.list_num_place[1]): for k in itertools.combinations('abcdef', (self.digits - sum(self.list_num_place))): n = .join(((i + j) + k)) self.list_possible_ans_combination.append(n)<|docstring|>進化後, 2番目に行う 最初の2回で作ったlist_ans_numから, 答えの5数字の"組み合わせ"の候補を全て洗い出し, list_possible_ans_combinationに格納 : rtype : None : return : なし<|endoftext|>
484054008aa1bf5d0dd725eee40696c2665b00dd1076d6628276ab22e0f10f61
def _remove_impossible_combination(self) -> None: '3番目で使用\n そのターンに質問で帰ってきたhit,blowの値を保存し, list_possible_ans_combinationの解の候補のiのうち\n self.numとiでhit,blowの和が一致しないものは答えの"組み合わせ"の候補としてありえないので候補から削除\n こうしてlist_possible_ans_combinationの中身を削っていく\n : rtype : None\n : return : なし\n ' hb = (self.hit + self.blow) for i in self.list_possible_ans_combination[:]: self._check_hit_blow(self.num, i) if ((self.hit + self.blow) != hb): self.list_possible_ans_combination.remove(i)
3番目で使用 そのターンに質問で帰ってきたhit,blowの値を保存し, list_possible_ans_combinationの解の候補のiのうち self.numとiでhit,blowの和が一致しないものは答えの"組み合わせ"の候補としてありえないので候補から削除 こうしてlist_possible_ans_combinationの中身を削っていく : rtype : None : return : なし
hitblow/class_play_game.py
_remove_impossible_combination
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _remove_impossible_combination(self) -> None: '3番目で使用\n そのターンに質問で帰ってきたhit,blowの値を保存し, list_possible_ans_combinationの解の候補のiのうち\n self.numとiでhit,blowの和が一致しないものは答えの"組み合わせ"の候補としてありえないので候補から削除\n こうしてlist_possible_ans_combinationの中身を削っていく\n : rtype : None\n : return : なし\n ' hb = (self.hit + self.blow) for i in self.list_possible_ans_combination[:]: self._check_hit_blow(self.num, i) if ((self.hit + self.blow) != hb): self.list_possible_ans_combination.remove(i)
def _remove_impossible_combination(self) -> None: '3番目で使用\n そのターンに質問で帰ってきたhit,blowの値を保存し, list_possible_ans_combinationの解の候補のiのうち\n self.numとiでhit,blowの和が一致しないものは答えの"組み合わせ"の候補としてありえないので候補から削除\n こうしてlist_possible_ans_combinationの中身を削っていく\n : rtype : None\n : return : なし\n ' hb = (self.hit + self.blow) for i in self.list_possible_ans_combination[:]: self._check_hit_blow(self.num, i) if ((self.hit + self.blow) != hb): self.list_possible_ans_combination.remove(i)<|docstring|>3番目で使用 そのターンに質問で帰ってきたhit,blowの値を保存し, list_possible_ans_combinationの解の候補のiのうち self.numとiでhit,blowの和が一致しないものは答えの"組み合わせ"の候補としてありえないので候補から削除 こうしてlist_possible_ans_combinationの中身を削っていく : rtype : None : return : なし<|endoftext|>
da7388c30dae4c41565504d1ab32d6344fb25ec12fc71cdf215062fbab70a91f
def _remove_impossible_permutation(self) -> None: '3番目で使用\n そのターンに質問で帰ってきたhitの値を保存し, list_possible_ansの解の候補のjのうち\n self.numとjでhitが一致しないものは答えの"順列"の候補としてありえないので候補から削除\n こうしてlist_possible_ansの中身を削っていく\n : rtype : None\n : return : なし\n ' hit = self.hit for i in self.list_possible_ans[:]: self._check_hit_blow(self.num, i) if (self.hit != hit): self.list_possible_ans.remove(i)
3番目で使用 そのターンに質問で帰ってきたhitの値を保存し, list_possible_ansの解の候補のjのうち self.numとjでhitが一致しないものは答えの"順列"の候補としてありえないので候補から削除 こうしてlist_possible_ansの中身を削っていく : rtype : None : return : なし
hitblow/class_play_game.py
_remove_impossible_permutation
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _remove_impossible_permutation(self) -> None: '3番目で使用\n そのターンに質問で帰ってきたhitの値を保存し, list_possible_ansの解の候補のjのうち\n self.numとjでhitが一致しないものは答えの"順列"の候補としてありえないので候補から削除\n こうしてlist_possible_ansの中身を削っていく\n : rtype : None\n : return : なし\n ' hit = self.hit for i in self.list_possible_ans[:]: self._check_hit_blow(self.num, i) if (self.hit != hit): self.list_possible_ans.remove(i)
def _remove_impossible_permutation(self) -> None: '3番目で使用\n そのターンに質問で帰ってきたhitの値を保存し, list_possible_ansの解の候補のjのうち\n self.numとjでhitが一致しないものは答えの"順列"の候補としてありえないので候補から削除\n こうしてlist_possible_ansの中身を削っていく\n : rtype : None\n : return : なし\n ' hit = self.hit for i in self.list_possible_ans[:]: self._check_hit_blow(self.num, i) if (self.hit != hit): self.list_possible_ans.remove(i)<|docstring|>3番目で使用 そのターンに質問で帰ってきたhitの値を保存し, list_possible_ansの解の候補のjのうち self.numとjでhitが一致しないものは答えの"順列"の候補としてありえないので候補から削除 こうしてlist_possible_ansの中身を削っていく : rtype : None : return : なし<|endoftext|>
896e6f596b8fbac92e3980cb32f3395546eac6467366bd2d5904294445e76a70
def _check_hit_blow(self, num, ans) -> None: '2つのremove関数内で使用\n 2つの引数を入力し, その2数のhit,blowを計算してself.hit, self.blowに格納\n : rtype : None\n : return : なし\n ' self.hit = 0 self.blow = 0 for i in range(self.digits): if (num[i] == ans[i]): self.hit += 1 elif (num[i] in ans): self.blow += 1
2つのremove関数内で使用 2つの引数を入力し, その2数のhit,blowを計算してself.hit, self.blowに格納 : rtype : None : return : なし
hitblow/class_play_game.py
_check_hit_blow
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _check_hit_blow(self, num, ans) -> None: '2つのremove関数内で使用\n 2つの引数を入力し, その2数のhit,blowを計算してself.hit, self.blowに格納\n : rtype : None\n : return : なし\n ' self.hit = 0 self.blow = 0 for i in range(self.digits): if (num[i] == ans[i]): self.hit += 1 elif (num[i] in ans): self.blow += 1
def _check_hit_blow(self, num, ans) -> None: '2つのremove関数内で使用\n 2つの引数を入力し, その2数のhit,blowを計算してself.hit, self.blowに格納\n : rtype : None\n : return : なし\n ' self.hit = 0 self.blow = 0 for i in range(self.digits): if (num[i] == ans[i]): self.hit += 1 elif (num[i] in ans): self.blow += 1<|docstring|>2つのremove関数内で使用 2つの引数を入力し, その2数のhit,blowを計算してself.hit, self.blowに格納 : rtype : None : return : なし<|endoftext|>
c3ddcfe7d8fc11ae09b35f07b5c952ca62bd1569e8140df9dc0193cc2344a29c
def _identify_number(self) -> None: '3番目に行う(1番のメイン部分)\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, list_ans_num_combinationの中からランダムで質問する数字を選んでself.numに格納\n post_guessし, 帰ってきたhit,blowをprintし, remove_impossible_combinationを行う\n hit+blow = 5の(組み合わせが特定出来た)ときは, まずその結果からあり得る数字の順列120通りのlist_possible_ansを作成\n 次にそのターンのhit,blowからあり得ないものを削除し, 順列を考える次の関数(後述)に移る\n 自分のターンで無かったら, 0.5秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' print('----------from first3 to 5C----------') while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name)): print('{}回目の入力です, 組み合わせの候補は{}通りです.'.format((self.count + 1), len(self.list_possible_ans_combination))) self.count += 1 self.num = random.choice(self.list_possible_ans_combination) self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.hit + self.blow) == self.digits): self.list_ans_combination = [i for i in self.num] for i in itertools.permutations(self.list_ans_combination, 5): m = ''.join(i) self.list_possible_ans.append(m) print('----------from 5C to 5P----------') if (st.session_state.level >= 20): self._remove_impossible_permutation() self._identify_permutation() break else: self._remove_impossible_combination() if (self.room_state == 3): break else: time.sleep(0.5) continue
3番目に行う(1番のメイン部分) 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, list_ans_num_combinationの中からランダムで質問する数字を選んでself.numに格納 post_guessし, 帰ってきたhit,blowをprintし, remove_impossible_combinationを行う hit+blow = 5の(組み合わせが特定出来た)ときは, まずその結果からあり得る数字の順列120通りのlist_possible_ansを作成 次にそのターンのhit,blowからあり得ないものを削除し, 順列を考える次の関数(後述)に移る 自分のターンで無かったら, 0.5秒待機 試合終了だったらループを抜ける : rtype : None : return : なし
hitblow/class_play_game.py
_identify_number
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _identify_number(self) -> None: '3番目に行う(1番のメイン部分)\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, list_ans_num_combinationの中からランダムで質問する数字を選んでself.numに格納\n post_guessし, 帰ってきたhit,blowをprintし, remove_impossible_combinationを行う\n hit+blow = 5の(組み合わせが特定出来た)ときは, まずその結果からあり得る数字の順列120通りのlist_possible_ansを作成\n 次にそのターンのhit,blowからあり得ないものを削除し, 順列を考える次の関数(後述)に移る\n 自分のターンで無かったら, 0.5秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' print('----------from first3 to 5C----------') while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name)): print('{}回目の入力です, 組み合わせの候補は{}通りです.'.format((self.count + 1), len(self.list_possible_ans_combination))) self.count += 1 self.num = random.choice(self.list_possible_ans_combination) self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.hit + self.blow) == self.digits): self.list_ans_combination = [i for i in self.num] for i in itertools.permutations(self.list_ans_combination, 5): m = .join(i) self.list_possible_ans.append(m) print('----------from 5C to 5P----------') if (st.session_state.level >= 20): self._remove_impossible_permutation() self._identify_permutation() break else: self._remove_impossible_combination() if (self.room_state == 3): break else: time.sleep(0.5) continue
def _identify_number(self) -> None: '3番目に行う(1番のメイン部分)\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, list_ans_num_combinationの中からランダムで質問する数字を選んでself.numに格納\n post_guessし, 帰ってきたhit,blowをprintし, remove_impossible_combinationを行う\n hit+blow = 5の(組み合わせが特定出来た)ときは, まずその結果からあり得る数字の順列120通りのlist_possible_ansを作成\n 次にそのターンのhit,blowからあり得ないものを削除し, 順列を考える次の関数(後述)に移る\n 自分のターンで無かったら, 0.5秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' print('----------from first3 to 5C----------') while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name)): print('{}回目の入力です, 組み合わせの候補は{}通りです.'.format((self.count + 1), len(self.list_possible_ans_combination))) self.count += 1 self.num = random.choice(self.list_possible_ans_combination) self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) if ((self.hit + self.blow) == self.digits): self.list_ans_combination = [i for i in self.num] for i in itertools.permutations(self.list_ans_combination, 5): m = .join(i) self.list_possible_ans.append(m) print('----------from 5C to 5P----------') if (st.session_state.level >= 20): self._remove_impossible_permutation() self._identify_permutation() break else: self._remove_impossible_combination() if (self.room_state == 3): break else: time.sleep(0.5) continue<|docstring|>3番目に行う(1番のメイン部分) 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, list_ans_num_combinationの中からランダムで質問する数字を選んでself.numに格納 post_guessし, 帰ってきたhit,blowをprintし, remove_impossible_combinationを行う hit+blow = 5の(組み合わせが特定出来た)ときは, まずその結果からあり得る数字の順列120通りのlist_possible_ansを作成 次にそのターンのhit,blowからあり得ないものを削除し, 順列を考える次の関数(後述)に移る 自分のターンで無かったら, 0.5秒待機 試合終了だったらループを抜ける : rtype : None : return : なし<|endoftext|>
03bc955f9c3e7c18ca5f48afc8d6b3e294d88c66a4a74c04d2f06a47b18aae8b
def _identify_permutation(self) -> None: '3番目で使用\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, list_ans_numの中からランダムで質問する数字を選んでself.numに格納\n post_guessし, 帰ってきたhitをprintし, remove_impossible_ansを行う\n 自分のターンで無かったら, 0.5秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name)): print('{}回目の入力です, 順列の候補は{}通りです.'.format((self.count + 1), len(self.list_possible_ans))) self.count += 1 self.num = random.choice(self.list_possible_ans) self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) self._remove_impossible_permutation() if (self.room_state == 3): break else: time.sleep(0.5) continue
3番目で使用 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, list_ans_numの中からランダムで質問する数字を選んでself.numに格納 post_guessし, 帰ってきたhitをprintし, remove_impossible_ansを行う 自分のターンで無かったら, 0.5秒待機 試合終了だったらループを抜ける : rtype : None : return : なし
hitblow/class_play_game.py
_identify_permutation
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _identify_permutation(self) -> None: '3番目で使用\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, list_ans_numの中からランダムで質問する数字を選んでself.numに格納\n post_guessし, 帰ってきたhitをprintし, remove_impossible_ansを行う\n 自分のターンで無かったら, 0.5秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name)): print('{}回目の入力です, 順列の候補は{}通りです.'.format((self.count + 1), len(self.list_possible_ans))) self.count += 1 self.num = random.choice(self.list_possible_ans) self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) self._remove_impossible_permutation() if (self.room_state == 3): break else: time.sleep(0.5) continue
def _identify_permutation(self) -> None: '3番目で使用\n 1秒ごとにget_tableで状態を確認し,\n 対戦続行中で,自分のターンのとき, list_ans_numの中からランダムで質問する数字を選んでself.numに格納\n post_guessし, 帰ってきたhitをprintし, remove_impossible_ansを行う\n 自分のターンで無かったら, 0.5秒待機\n 試合終了だったらループを抜ける\n : rtype : None\n : return : なし\n ' while True: self._get_table_by_API() if ((self.room_state == 2) and (self.now_player == self.player_name)): print('{}回目の入力です, 順列の候補は{}通りです.'.format((self.count + 1), len(self.list_possible_ans))) self.count += 1 self.num = random.choice(self.list_possible_ans) self._post_guess_by_API() self._get_table_by_API() self.hit = self.my_history[(- 1)]['hit'] self.blow = self.my_history[(- 1)]['blow'] print('-----', self.num) print('!! {} Hit, {} Blow !!'.format(self.hit, self.blow)) self._remove_impossible_permutation() if (self.room_state == 3): break else: time.sleep(0.5) continue<|docstring|>3番目で使用 1秒ごとにget_tableで状態を確認し, 対戦続行中で,自分のターンのとき, list_ans_numの中からランダムで質問する数字を選んでself.numに格納 post_guessし, 帰ってきたhitをprintし, remove_impossible_ansを行う 自分のターンで無かったら, 0.5秒待機 試合終了だったらループを抜ける : rtype : None : return : なし<|endoftext|>
89b3c06449daeda9b225399dad77c66cc3e1b4106bbe707463aea729fbbb9475
def run(self) -> None: ' 数当てゲーム実行ランナー\n 対戦中の表示を出してから部屋を作成して答えをポストして対戦開始, 終わったら対戦終了と結果の表示\n : rtype : None\n : return : なし\n ' st.session_state.col2.subheader('{}の現在のレベル : {}'.format(st.session_state.chara_name, st.session_state.level)) st.session_state.col2.write('対戦回数 : {}'.format(st.session_state.game_count)) place = st.session_state.col6.empty() self._enterroom_and_registerplayer() self._post_hidden_number() place.write('対戦中・・・') if (st.session_state.level >= 20): self._first_2_times() self._make_list_possible_ans_combination() else: self._first_3_times() self._make_list_possible_ans_combination_3() self._identify_number() place.write('対戦終了!') self._music_stop() self._show_result_vscode() self._show_result_streamlit()
数当てゲーム実行ランナー 対戦中の表示を出してから部屋を作成して答えをポストして対戦開始, 終わったら対戦終了と結果の表示 : rtype : None : return : なし
hitblow/class_play_game.py
run
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def run(self) -> None: ' 数当てゲーム実行ランナー\n 対戦中の表示を出してから部屋を作成して答えをポストして対戦開始, 終わったら対戦終了と結果の表示\n : rtype : None\n : return : なし\n ' st.session_state.col2.subheader('{}の現在のレベル : {}'.format(st.session_state.chara_name, st.session_state.level)) st.session_state.col2.write('対戦回数 : {}'.format(st.session_state.game_count)) place = st.session_state.col6.empty() self._enterroom_and_registerplayer() self._post_hidden_number() place.write('対戦中・・・') if (st.session_state.level >= 20): self._first_2_times() self._make_list_possible_ans_combination() else: self._first_3_times() self._make_list_possible_ans_combination_3() self._identify_number() place.write('対戦終了!') self._music_stop() self._show_result_vscode() self._show_result_streamlit()
def run(self) -> None: ' 数当てゲーム実行ランナー\n 対戦中の表示を出してから部屋を作成して答えをポストして対戦開始, 終わったら対戦終了と結果の表示\n : rtype : None\n : return : なし\n ' st.session_state.col2.subheader('{}の現在のレベル : {}'.format(st.session_state.chara_name, st.session_state.level)) st.session_state.col2.write('対戦回数 : {}'.format(st.session_state.game_count)) place = st.session_state.col6.empty() self._enterroom_and_registerplayer() self._post_hidden_number() place.write('対戦中・・・') if (st.session_state.level >= 20): self._first_2_times() self._make_list_possible_ans_combination() else: self._first_3_times() self._make_list_possible_ans_combination_3() self._identify_number() place.write('対戦終了!') self._music_stop() self._show_result_vscode() self._show_result_streamlit()<|docstring|>数当てゲーム実行ランナー 対戦中の表示を出してから部屋を作成して答えをポストして対戦開始, 終わったら対戦終了と結果の表示 : rtype : None : return : なし<|endoftext|>
87fe9522b43276c41d84b5427560202cd75d46e9322db40c76f31a4b73a093c7
def _show_result_vscode(self) -> None: '対戦終了後, お互いの結果を表示(vscode上に表示する分)\n : rtype : None\n : return : なし\n ' print('--------------------') print('対戦終了です.') url_get_table = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table') result = session.get(url_get_table) data = result.json() self.my_history = data['table'] self.opponent_history = data['opponent_table'] print('------------------------') print('show opponent history') print(self.opponent_history) print('------------------------') print('show my history') print(self.my_history) self.winner = data['winner'] print('------------------------') print('勝者は{}です. {}回で正解しました!'.format(self.winner, self.count)) print('終了ターンの解答 : {}'.format(self.num)) print('------------------------')
対戦終了後, お互いの結果を表示(vscode上に表示する分) : rtype : None : return : なし
hitblow/class_play_game.py
_show_result_vscode
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _show_result_vscode(self) -> None: '対戦終了後, お互いの結果を表示(vscode上に表示する分)\n : rtype : None\n : return : なし\n ' print('--------------------') print('対戦終了です.') url_get_table = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table') result = session.get(url_get_table) data = result.json() self.my_history = data['table'] self.opponent_history = data['opponent_table'] print('------------------------') print('show opponent history') print(self.opponent_history) print('------------------------') print('show my history') print(self.my_history) self.winner = data['winner'] print('------------------------') print('勝者は{}です. {}回で正解しました!'.format(self.winner, self.count)) print('終了ターンの解答 : {}'.format(self.num)) print('------------------------')
def _show_result_vscode(self) -> None: '対戦終了後, お互いの結果を表示(vscode上に表示する分)\n : rtype : None\n : return : なし\n ' print('--------------------') print('対戦終了です.') url_get_table = (((((self.url + '/rooms/') + str(self.room_id)) + '/players/') + self.player_name) + '/table') result = session.get(url_get_table) data = result.json() self.my_history = data['table'] self.opponent_history = data['opponent_table'] print('------------------------') print('show opponent history') print(self.opponent_history) print('------------------------') print('show my history') print(self.my_history) self.winner = data['winner'] print('------------------------') print('勝者は{}です. {}回で正解しました!'.format(self.winner, self.count)) print('終了ターンの解答 : {}'.format(self.num)) print('------------------------')<|docstring|>対戦終了後, お互いの結果を表示(vscode上に表示する分) : rtype : None : return : なし<|endoftext|>
6a572177109d72852ae422ca695d14accf2c37c39faaedce0b2c7f8823647cd9
def _get_information(self) -> str: '対戦終了後,web画面に表示する内容を計算\n 勝敗,連勝に応じて獲得経験値を求め, 経験値に加える.レベルや次のレベルまでの必要経験値も求める\n レベルアップと進化の判定も行う\n : rtype : str\n : return : 獲得経験値と次のレベルまでの必要経験値\n ' if (self.winner == self.player_name): st.session_state.win_in_a_row += 1 new_exp = round(((3000 * (1 + ((st.session_state.win_in_a_row - 1) / 4))) / self.count)) elif (self.winner == None): st.session_state.win_in_a_row = 0 new_exp = round((20 * self.count)) else: st.session_state.win_in_a_row = 0 new_exp = round((15 * self.count)) st.session_state.game_count += 1 st.session_state.exp += new_exp level_up = False evolution = False for i in range(200): if ((((i ** 3) / 3) <= st.session_state.exp) and (st.session_state.exp < (((i + 1) ** 3) / 3))): remaining_exp = round(((((i + 1) ** 3) / 3) - st.session_state.exp)) new_level = i if (new_level != st.session_state.level): level_up = True if (new_level == 20): evolution = True st.session_state.level = new_level break return (new_exp, remaining_exp, level_up, evolution)
対戦終了後,web画面に表示する内容を計算 勝敗,連勝に応じて獲得経験値を求め, 経験値に加える.レベルや次のレベルまでの必要経験値も求める レベルアップと進化の判定も行う : rtype : str : return : 獲得経験値と次のレベルまでの必要経験値
hitblow/class_play_game.py
_get_information
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _get_information(self) -> str: '対戦終了後,web画面に表示する内容を計算\n 勝敗,連勝に応じて獲得経験値を求め, 経験値に加える.レベルや次のレベルまでの必要経験値も求める\n レベルアップと進化の判定も行う\n : rtype : str\n : return : 獲得経験値と次のレベルまでの必要経験値\n ' if (self.winner == self.player_name): st.session_state.win_in_a_row += 1 new_exp = round(((3000 * (1 + ((st.session_state.win_in_a_row - 1) / 4))) / self.count)) elif (self.winner == None): st.session_state.win_in_a_row = 0 new_exp = round((20 * self.count)) else: st.session_state.win_in_a_row = 0 new_exp = round((15 * self.count)) st.session_state.game_count += 1 st.session_state.exp += new_exp level_up = False evolution = False for i in range(200): if ((((i ** 3) / 3) <= st.session_state.exp) and (st.session_state.exp < (((i + 1) ** 3) / 3))): remaining_exp = round(((((i + 1) ** 3) / 3) - st.session_state.exp)) new_level = i if (new_level != st.session_state.level): level_up = True if (new_level == 20): evolution = True st.session_state.level = new_level break return (new_exp, remaining_exp, level_up, evolution)
def _get_information(self) -> str: '対戦終了後,web画面に表示する内容を計算\n 勝敗,連勝に応じて獲得経験値を求め, 経験値に加える.レベルや次のレベルまでの必要経験値も求める\n レベルアップと進化の判定も行う\n : rtype : str\n : return : 獲得経験値と次のレベルまでの必要経験値\n ' if (self.winner == self.player_name): st.session_state.win_in_a_row += 1 new_exp = round(((3000 * (1 + ((st.session_state.win_in_a_row - 1) / 4))) / self.count)) elif (self.winner == None): st.session_state.win_in_a_row = 0 new_exp = round((20 * self.count)) else: st.session_state.win_in_a_row = 0 new_exp = round((15 * self.count)) st.session_state.game_count += 1 st.session_state.exp += new_exp level_up = False evolution = False for i in range(200): if ((((i ** 3) / 3) <= st.session_state.exp) and (st.session_state.exp < (((i + 1) ** 3) / 3))): remaining_exp = round(((((i + 1) ** 3) / 3) - st.session_state.exp)) new_level = i if (new_level != st.session_state.level): level_up = True if (new_level == 20): evolution = True st.session_state.level = new_level break return (new_exp, remaining_exp, level_up, evolution)<|docstring|>対戦終了後,web画面に表示する内容を計算 勝敗,連勝に応じて獲得経験値を求め, 経験値に加える.レベルや次のレベルまでの必要経験値も求める レベルアップと進化の判定も行う : rtype : str : return : 獲得経験値と次のレベルまでの必要経験値<|endoftext|>
a119a5afec6cb510b67af05f29c2ed5c3bbd915e80769d8bd7197aa294ab6f4f
def _show_result_streamlit(self) -> None: '対戦終了後, お互いの結果を表示(web画面上に表示する分)\n 勝敗、連勝数に応じて表示を変える, 経験値やレベル, 対戦回数も表示\n 進化やレベルアップの時は追加のエフェクト\n : rtype : None\n : return : なし\n ' (new_exp, remaining_exp, level_up, evolution) = self._get_information() time.sleep(3) st.session_state.col6.subheader('') if (self.winner == self.player_name): self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/winner.wav')) self._play_song(num=(- 1), title='bgm/winner.wav') st.session_state.col6.subheader('勝利だ,おめでとう!') st.session_state.col6.subheader('正解は‥【{}】'.format(self.num)) st.session_state.col6.subheader('{}回で正解できた!'.format(self.count)) st.session_state.col6.subheader('') if (st.session_state.win_in_a_row >= 2): st.session_state.col6.subheader('すごいぞ,{}連勝だ!この調子!'.format(st.session_state.win_in_a_row)) st.balloons() elif (self.winner == None): self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/draw.wav')) self._play_song(num=(- 1), title='bgm/draw.mp3') st.session_state.col6.subheader('引き分けだ! ') st.session_state.col6.subheader('正解は‥【{}】'.format(self.num)) st.session_state.col6.subheader('{}回で正解した!'.format(self.count)) else: self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/loser.wav')) self._play_song(num=(- 1), title='bgm/loser.wav') st.session_state.col6.subheader('負けてしまった・・・次は勝とう!') st.session_state.col6.write('{}は{}経験値を得た!'.format(st.session_state.chara_name, new_exp)) st.session_state.col6.write('履歴') st.session_state.col6.write('') st.session_state.col6.write(self.my_history) time.sleep(13) if level_up: self._music_stop() if evolution: st.session_state.col4.subheader('おや?{}の様子が...'.format(st.session_state.chara_name)) image_light = Image.open('picture/evolution_light.png') st.session_state.col4.image(image_light) self._play_song(num=1, title='bgm/evolution_light.mp3') time.sleep(3) st.session_state.col6.subheader('やったね, 進化した!') pic_url2 = (('picture/' + st.session_state.chara_name) + '-2.jpg') image = Image.open(pic_url2) st.session_state.col4.image(image) img = Image.open('picture/evolution.gif') st.session_state.col6.image(img) self._play_song(num=1, title='bgm/evolution.mp3') time.sleep(3) else: st.session_state.col6.subheader('レベルアップだ!') self._play_song(num=1, title='bgm/level_up.wav') img = Image.open('picture/level-up.gif') time.sleep(1) st.session_state.col6.image(img) time.sleep(1) st.session_state.col6.write('次のレベルまでの経験値:{}'.format(remaining_exp)) st.session_state.col6.write('今まで得た合計経験値:{}'.format(st.session_state.exp)) st.session_state.col6.subheader('') st.session_state.col6.subheader('{}の現在のレベル : {}'.format(st.session_state.chara_name, st.session_state.level)) st.session_state.col6.write('対戦回数 : {}'.format(st.session_state.game_count))
対戦終了後, お互いの結果を表示(web画面上に表示する分) 勝敗、連勝数に応じて表示を変える, 経験値やレベル, 対戦回数も表示 進化やレベルアップの時は追加のエフェクト : rtype : None : return : なし
hitblow/class_play_game.py
_show_result_streamlit
HayatoNatural/NEDO-Hit-Blow-teamF
1
python
def _show_result_streamlit(self) -> None: '対戦終了後, お互いの結果を表示(web画面上に表示する分)\n 勝敗、連勝数に応じて表示を変える, 経験値やレベル, 対戦回数も表示\n 進化やレベルアップの時は追加のエフェクト\n : rtype : None\n : return : なし\n ' (new_exp, remaining_exp, level_up, evolution) = self._get_information() time.sleep(3) st.session_state.col6.subheader() if (self.winner == self.player_name): self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/winner.wav')) self._play_song(num=(- 1), title='bgm/winner.wav') st.session_state.col6.subheader('勝利だ,おめでとう!') st.session_state.col6.subheader('正解は‥【{}】'.format(self.num)) st.session_state.col6.subheader('{}回で正解できた!'.format(self.count)) st.session_state.col6.subheader() if (st.session_state.win_in_a_row >= 2): st.session_state.col6.subheader('すごいぞ,{}連勝だ!この調子!'.format(st.session_state.win_in_a_row)) st.balloons() elif (self.winner == None): self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/draw.wav')) self._play_song(num=(- 1), title='bgm/draw.mp3') st.session_state.col6.subheader('引き分けだ! ') st.session_state.col6.subheader('正解は‥【{}】'.format(self.num)) st.session_state.col6.subheader('{}回で正解した!'.format(self.count)) else: self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/loser.wav')) self._play_song(num=(- 1), title='bgm/loser.wav') st.session_state.col6.subheader('負けてしまった・・・次は勝とう!') st.session_state.col6.write('{}は{}経験値を得た!'.format(st.session_state.chara_name, new_exp)) st.session_state.col6.write('履歴') st.session_state.col6.write() st.session_state.col6.write(self.my_history) time.sleep(13) if level_up: self._music_stop() if evolution: st.session_state.col4.subheader('おや?{}の様子が...'.format(st.session_state.chara_name)) image_light = Image.open('picture/evolution_light.png') st.session_state.col4.image(image_light) self._play_song(num=1, title='bgm/evolution_light.mp3') time.sleep(3) st.session_state.col6.subheader('やったね, 進化した!') pic_url2 = (('picture/' + st.session_state.chara_name) + '-2.jpg') image = Image.open(pic_url2) st.session_state.col4.image(image) img = Image.open('picture/evolution.gif') st.session_state.col6.image(img) self._play_song(num=1, title='bgm/evolution.mp3') time.sleep(3) else: st.session_state.col6.subheader('レベルアップだ!') self._play_song(num=1, title='bgm/level_up.wav') img = Image.open('picture/level-up.gif') time.sleep(1) st.session_state.col6.image(img) time.sleep(1) st.session_state.col6.write('次のレベルまでの経験値:{}'.format(remaining_exp)) st.session_state.col6.write('今まで得た合計経験値:{}'.format(st.session_state.exp)) st.session_state.col6.subheader() st.session_state.col6.subheader('{}の現在のレベル : {}'.format(st.session_state.chara_name, st.session_state.level)) st.session_state.col6.write('対戦回数 : {}'.format(st.session_state.game_count))
def _show_result_streamlit(self) -> None: '対戦終了後, お互いの結果を表示(web画面上に表示する分)\n 勝敗、連勝数に応じて表示を変える, 経験値やレベル, 対戦回数も表示\n 進化やレベルアップの時は追加のエフェクト\n : rtype : None\n : return : なし\n ' (new_exp, remaining_exp, level_up, evolution) = self._get_information() time.sleep(3) st.session_state.col6.subheader() if (self.winner == self.player_name): self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/winner.wav')) self._play_song(num=(- 1), title='bgm/winner.wav') st.session_state.col6.subheader('勝利だ,おめでとう!') st.session_state.col6.subheader('正解は‥【{}】'.format(self.num)) st.session_state.col6.subheader('{}回で正解できた!'.format(self.count)) st.session_state.col6.subheader() if (st.session_state.win_in_a_row >= 2): st.session_state.col6.subheader('すごいぞ,{}連勝だ!この調子!'.format(st.session_state.win_in_a_row)) st.balloons() elif (self.winner == None): self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/draw.wav')) self._play_song(num=(- 1), title='bgm/draw.mp3') st.session_state.col6.subheader('引き分けだ! ') st.session_state.col6.subheader('正解は‥【{}】'.format(self.num)) st.session_state.col6.subheader('{}回で正解した!'.format(self.count)) else: self._sound_play(num=1, title=(('voice/' + st.session_state.chara_name) + '/loser.wav')) self._play_song(num=(- 1), title='bgm/loser.wav') st.session_state.col6.subheader('負けてしまった・・・次は勝とう!') st.session_state.col6.write('{}は{}経験値を得た!'.format(st.session_state.chara_name, new_exp)) st.session_state.col6.write('履歴') st.session_state.col6.write() st.session_state.col6.write(self.my_history) time.sleep(13) if level_up: self._music_stop() if evolution: st.session_state.col4.subheader('おや?{}の様子が...'.format(st.session_state.chara_name)) image_light = Image.open('picture/evolution_light.png') st.session_state.col4.image(image_light) self._play_song(num=1, title='bgm/evolution_light.mp3') time.sleep(3) st.session_state.col6.subheader('やったね, 進化した!') pic_url2 = (('picture/' + st.session_state.chara_name) + '-2.jpg') image = Image.open(pic_url2) st.session_state.col4.image(image) img = Image.open('picture/evolution.gif') st.session_state.col6.image(img) self._play_song(num=1, title='bgm/evolution.mp3') time.sleep(3) else: st.session_state.col6.subheader('レベルアップだ!') self._play_song(num=1, title='bgm/level_up.wav') img = Image.open('picture/level-up.gif') time.sleep(1) st.session_state.col6.image(img) time.sleep(1) st.session_state.col6.write('次のレベルまでの経験値:{}'.format(remaining_exp)) st.session_state.col6.write('今まで得た合計経験値:{}'.format(st.session_state.exp)) st.session_state.col6.subheader() st.session_state.col6.subheader('{}の現在のレベル : {}'.format(st.session_state.chara_name, st.session_state.level)) st.session_state.col6.write('対戦回数 : {}'.format(st.session_state.game_count))<|docstring|>対戦終了後, お互いの結果を表示(web画面上に表示する分) 勝敗、連勝数に応じて表示を変える, 経験値やレベル, 対戦回数も表示 進化やレベルアップの時は追加のエフェクト : rtype : None : return : なし<|endoftext|>
b79b23637c55ec812148510dc30e48e8d3300735444967d1bbb28aa878f7971a
def _refresh_thrift_client(self): 'Refresh the Thrift socket, transport, and client.' socket = TSSLSocket(self.host, self.port, False, KEY_FILE, KEY_FILE, KEY_FILE) if (self.timeout is not None): socket.setTimeout(self.timeout) self.transport = self._transport_class(socket) protocol = self._protocol_class(self.transport) self.client = Hbase.Client(protocol)
Refresh the Thrift socket, transport, and client.
python/thrift/appengine-ssl-gateway/appengine/bigtable.py
_refresh_thrift_client
hgonggg/cloud-bigtable-examples
229
python
def _refresh_thrift_client(self): socket = TSSLSocket(self.host, self.port, False, KEY_FILE, KEY_FILE, KEY_FILE) if (self.timeout is not None): socket.setTimeout(self.timeout) self.transport = self._transport_class(socket) protocol = self._protocol_class(self.transport) self.client = Hbase.Client(protocol)
def _refresh_thrift_client(self): socket = TSSLSocket(self.host, self.port, False, KEY_FILE, KEY_FILE, KEY_FILE) if (self.timeout is not None): socket.setTimeout(self.timeout) self.transport = self._transport_class(socket) protocol = self._protocol_class(self.transport) self.client = Hbase.Client(protocol)<|docstring|>Refresh the Thrift socket, transport, and client.<|endoftext|>
bb659380b1cbd9a262d54e3c9d5c9af251fe7788ce7788a3ec1d72d306254b60
def __init__(self, host, port, user, password, database): '\n [summary]: Constructor\n Args:\n host ([type]): [description]\n port ([type]): [description]\n user ([type]): [description]\n password ([type]): [description]\n database ([type]): [description]\n ' self.host = host self.port = port self.user = user self.password = password self.database = database self.isconnected = False self.connection = None dbconfig = {'host': host, 'port': port, 'user': user, 'password': password, 'database': database} self.pool = self.create_pool(dbconfig, 'auto_neuron_pool', 3)
[summary]: Constructor Args: host ([type]): [description] port ([type]): [description] user ([type]): [description] password ([type]): [description] database ([type]): [description]
src/utils/databases/mysql_helper.py
__init__
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def __init__(self, host, port, user, password, database): '\n [summary]: Constructor\n Args:\n host ([type]): [description]\n port ([type]): [description]\n user ([type]): [description]\n password ([type]): [description]\n database ([type]): [description]\n ' self.host = host self.port = port self.user = user self.password = password self.database = database self.isconnected = False self.connection = None dbconfig = {'host': host, 'port': port, 'user': user, 'password': password, 'database': database} self.pool = self.create_pool(dbconfig, 'auto_neuron_pool', 3)
def __init__(self, host, port, user, password, database): '\n [summary]: Constructor\n Args:\n host ([type]): [description]\n port ([type]): [description]\n user ([type]): [description]\n password ([type]): [description]\n database ([type]): [description]\n ' self.host = host self.port = port self.user = user self.password = password self.database = database self.isconnected = False self.connection = None dbconfig = {'host': host, 'port': port, 'user': user, 'password': password, 'database': database} self.pool = self.create_pool(dbconfig, 'auto_neuron_pool', 3)<|docstring|>[summary]: Constructor Args: host ([type]): [description] port ([type]): [description] user ([type]): [description] password ([type]): [description] database ([type]): [description]<|endoftext|>
3ce9461bd8054f6ed016d8abf04e765a4815bd871d48b21ce1057cd1358e1f96
def create_pool(self, dbconfig, pool_name='mypool', pool_size=3): '[summary]\n Create a connection pool, after created, the request of connecting \n MySQL could get a connection from this pool instead of request to \n create a connection.\n Args:\n pool_name (str, optional): [description]. Defaults to "mypool".\n pool_size (int, optional): [description]. Defaults to 3.\n\n Returns:\n [type]: [description]\n ' pool = mysql.connector.pooling.MySQLConnectionPool(pool_name=pool_name, pool_size=pool_size, pool_reset_session=True, **dbconfig) return pool
[summary] Create a connection pool, after created, the request of connecting MySQL could get a connection from this pool instead of request to create a connection. Args: pool_name (str, optional): [description]. Defaults to "mypool". pool_size (int, optional): [description]. Defaults to 3. Returns: [type]: [description]
src/utils/databases/mysql_helper.py
create_pool
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def create_pool(self, dbconfig, pool_name='mypool', pool_size=3): '[summary]\n Create a connection pool, after created, the request of connecting \n MySQL could get a connection from this pool instead of request to \n create a connection.\n Args:\n pool_name (str, optional): [description]. Defaults to "mypool".\n pool_size (int, optional): [description]. Defaults to 3.\n\n Returns:\n [type]: [description]\n ' pool = mysql.connector.pooling.MySQLConnectionPool(pool_name=pool_name, pool_size=pool_size, pool_reset_session=True, **dbconfig) return pool
def create_pool(self, dbconfig, pool_name='mypool', pool_size=3): '[summary]\n Create a connection pool, after created, the request of connecting \n MySQL could get a connection from this pool instead of request to \n create a connection.\n Args:\n pool_name (str, optional): [description]. Defaults to "mypool".\n pool_size (int, optional): [description]. Defaults to 3.\n\n Returns:\n [type]: [description]\n ' pool = mysql.connector.pooling.MySQLConnectionPool(pool_name=pool_name, pool_size=pool_size, pool_reset_session=True, **dbconfig) return pool<|docstring|>[summary] Create a connection pool, after created, the request of connecting MySQL could get a connection from this pool instead of request to create a connection. Args: pool_name (str, optional): [description]. Defaults to "mypool". pool_size (int, optional): [description]. Defaults to 3. Returns: [type]: [description]<|endoftext|>
5311783567d3671162a96d0858ed527b024345cab1f6c0c2161c7378d02632ed
def close(self, conn, cursor): '\n A method used to close connection of mysql.\n :param conn: \n :param cursor: \n :return: \n ' if (cursor is not None): cursor.close() if (conn is not None): conn.close()
A method used to close connection of mysql. :param conn: :param cursor: :return:
src/utils/databases/mysql_helper.py
close
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def close(self, conn, cursor): '\n A method used to close connection of mysql.\n :param conn: \n :param cursor: \n :return: \n ' if (cursor is not None): cursor.close() if (conn is not None): conn.close()
def close(self, conn, cursor): '\n A method used to close connection of mysql.\n :param conn: \n :param cursor: \n :return: \n ' if (cursor is not None): cursor.close() if (conn is not None): conn.close()<|docstring|>A method used to close connection of mysql. :param conn: :param cursor: :return:<|endoftext|>
13ae75508d5f322e61415f91bed438497197babf468dd2f5adbd0f8fddbe8445
def fetch_all(self, query): '\n [summary]: This function will return all record from table\n Args:\n query ([type]): [Select tabel query]\n\n Returns:\n [type]: [description]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) data = cursor.fetchall() return data except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)
[summary]: This function will return all record from table Args: query ([type]): [Select tabel query] Returns: [type]: [description]
src/utils/databases/mysql_helper.py
fetch_all
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def fetch_all(self, query): '\n [summary]: This function will return all record from table\n Args:\n query ([type]): [Select tabel query]\n\n Returns:\n [type]: [description]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) data = cursor.fetchall() return data except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)
def fetch_all(self, query): '\n [summary]: This function will return all record from table\n Args:\n query ([type]): [Select tabel query]\n\n Returns:\n [type]: [description]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) data = cursor.fetchall() return data except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)<|docstring|>[summary]: This function will return all record from table Args: query ([type]): [Select tabel query] Returns: [type]: [description]<|endoftext|>
75c0cf3e382bf82d1060ea6d2dc95b23ea2c0f3206bdd4f07fca9d21de262144
def fetch_one(self, query): '\n [summary]:This method return single record from table\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [Data]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) data = cursor.fetchone() return data except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)
[summary]:This method return single record from table Args: query ([type]): [Query to execute] Returns: [type]: [Data]
src/utils/databases/mysql_helper.py
fetch_one
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def fetch_one(self, query): '\n [summary]:This method return single record from table\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [Data]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) data = cursor.fetchone() return data except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)
def fetch_one(self, query): '\n [summary]:This method return single record from table\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [Data]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) data = cursor.fetchone() return data except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)<|docstring|>[summary]:This method return single record from table Args: query ([type]): [Query to execute] Returns: [type]: [Data]<|endoftext|>
fefed4cdd3f79365f999fe4a715914bdb0ca7c18bd29874d3caa159912fb8ebe
def delete_record(self, query): '\n [summary]: Function to delete record from table single or multiple\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [No of row effected]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount conn.commit() self.close(conn, cursor) return rowcount except connector.Error as error: logger.error('Error: {}'.format(error))
[summary]: Function to delete record from table single or multiple Args: query ([type]): [Query to execute] Returns: [type]: [No of row effected]
src/utils/databases/mysql_helper.py
delete_record
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def delete_record(self, query): '\n [summary]: Function to delete record from table single or multiple\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [No of row effected]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount conn.commit() self.close(conn, cursor) return rowcount except connector.Error as error: logger.error('Error: {}'.format(error))
def delete_record(self, query): '\n [summary]: Function to delete record from table single or multiple\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [No of row effected]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount conn.commit() self.close(conn, cursor) return rowcount except connector.Error as error: logger.error('Error: {}'.format(error))<|docstring|>[summary]: Function to delete record from table single or multiple Args: query ([type]): [Query to execute] Returns: [type]: [No of row effected]<|endoftext|>
4f0d7306e13dc9d707d25c8c8c42f2f03b02cadc752866ff734f2599641b7621
def update_record(self, query): '\n [summary]: Function to delete record from table single or multiple\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [No of row effected]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount return rowcount except connector.Error as error: logger.error('Error: {}'.format(error)) finally: conn.commit() self.close(conn, cursor)
[summary]: Function to delete record from table single or multiple Args: query ([type]): [Query to execute] Returns: [type]: [No of row effected]
src/utils/databases/mysql_helper.py
update_record
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def update_record(self, query): '\n [summary]: Function to delete record from table single or multiple\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [No of row effected]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount return rowcount except connector.Error as error: logger.error('Error: {}'.format(error)) finally: conn.commit() self.close(conn, cursor)
def update_record(self, query): '\n [summary]: Function to delete record from table single or multiple\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [No of row effected]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount return rowcount except connector.Error as error: logger.error('Error: {}'.format(error)) finally: conn.commit() self.close(conn, cursor)<|docstring|>[summary]: Function to delete record from table single or multiple Args: query ([type]): [Query to execute] Returns: [type]: [No of row effected]<|endoftext|>
392d9c7f219480ae35ce8c352dd0913a2f25f7b469381eac1c5a6e625a2c218d
def insert_record(self, query): '\n [summary]:Insert record into table\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [1 if row inserted or 0 if not]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount conn.commit() return rowcount except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)
[summary]:Insert record into table Args: query ([type]): [Query to execute] Returns: [type]: [1 if row inserted or 0 if not]
src/utils/databases/mysql_helper.py
insert_record
Supreeth-Shetty/Projectathon---Simplified-AI
8
python
def insert_record(self, query): '\n [summary]:Insert record into table\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [1 if row inserted or 0 if not]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount conn.commit() return rowcount except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)
def insert_record(self, query): '\n [summary]:Insert record into table\n Args:\n query ([type]): [Query to execute]\n\n Returns:\n [type]: [1 if row inserted or 0 if not]\n ' conn = None cursor = None try: conn = self.pool.get_connection() cursor = conn.cursor() cursor.execute(query) rowcount = cursor.rowcount conn.commit() return rowcount except connector.Error as error: logger.error('Error: {}'.format(error)) finally: self.close(conn, cursor)<|docstring|>[summary]:Insert record into table Args: query ([type]): [Query to execute] Returns: [type]: [1 if row inserted or 0 if not]<|endoftext|>
62a616b82935de9a54dd8e286f5dc35005600b31e80ce7596c16def2ed0716d5
def test_ch_encoder(): '\n Tests the encoding of the channel encoder\n ' config = ConfigManager() config.set('types', 'msg_len', 'U16') enc = ChEncoder() enc_config = ChEncoder(config) temp = ChTemplate(101, 'test_ch', 'test_comp', U32Type()) time_obj = TimeType(2, 0, 1533758629, 123456) ch_obj = ChData(U32Type(42), time_obj, temp) desc_bin = b'\x00\x00\x00\x01' id_bin = b'\x00\x00\x00e' time_bin = b'\x00\x02\x00[kL\xa5\x00\x01\xe2@' val_bin = b'\x00\x00\x00*' long_len_bin = b'\x00\x00\x00\x17' short_len_bin = b'\x00\x17' reg_expected = ((((long_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) config_expected = ((((short_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) reg_output = enc.encode_api(ch_obj) assert (reg_output == reg_expected), ('FAIL: expected regular output to be %s, but found %s' % (list(reg_expected), list(reg_output))) config_output = enc_config.encode_api(ch_obj) assert (config_output == config_expected), ('FAIL: expected configured output to be %s, but found %s' % (list(config_expected), list(config_output))) temp = ChTemplate(102, 'test_ch2', 'test_comp2', U16Type()) time_obj = TimeType(2, 0, 1533758628, 123457) ch_obj = ChData(U16Type(40), time_obj, temp) desc_bin = b'\x00\x00\x00\x01' id_bin = b'\x00\x00\x00f' time_bin = b'\x00\x02\x00[kL\xa4\x00\x01\xe2A' val_bin = b'\x00(' long_len_bin = b'\x00\x00\x00\x15' short_len_bin = b'\x00\x15' reg_expected = ((((long_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) config_expected = ((((short_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) reg_output = enc.encode_api(ch_obj) assert (reg_output == reg_expected), ('FAIL: expected regular output to be %s, but found %s' % (list(reg_expected), list(reg_output))) config_output = enc_config.encode_api(ch_obj) assert (config_output == config_expected), ('FAIL: expected configured output to be %s, but found %s' % (list(config_expected), list(config_output)))
Tests the encoding of the channel encoder
Gds/test/fprime_gds/common/encoders/test_ch_encoder.py
test_ch_encoder
Serwios/fprime
4
python
def test_ch_encoder(): '\n \n ' config = ConfigManager() config.set('types', 'msg_len', 'U16') enc = ChEncoder() enc_config = ChEncoder(config) temp = ChTemplate(101, 'test_ch', 'test_comp', U32Type()) time_obj = TimeType(2, 0, 1533758629, 123456) ch_obj = ChData(U32Type(42), time_obj, temp) desc_bin = b'\x00\x00\x00\x01' id_bin = b'\x00\x00\x00e' time_bin = b'\x00\x02\x00[kL\xa5\x00\x01\xe2@' val_bin = b'\x00\x00\x00*' long_len_bin = b'\x00\x00\x00\x17' short_len_bin = b'\x00\x17' reg_expected = ((((long_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) config_expected = ((((short_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) reg_output = enc.encode_api(ch_obj) assert (reg_output == reg_expected), ('FAIL: expected regular output to be %s, but found %s' % (list(reg_expected), list(reg_output))) config_output = enc_config.encode_api(ch_obj) assert (config_output == config_expected), ('FAIL: expected configured output to be %s, but found %s' % (list(config_expected), list(config_output))) temp = ChTemplate(102, 'test_ch2', 'test_comp2', U16Type()) time_obj = TimeType(2, 0, 1533758628, 123457) ch_obj = ChData(U16Type(40), time_obj, temp) desc_bin = b'\x00\x00\x00\x01' id_bin = b'\x00\x00\x00f' time_bin = b'\x00\x02\x00[kL\xa4\x00\x01\xe2A' val_bin = b'\x00(' long_len_bin = b'\x00\x00\x00\x15' short_len_bin = b'\x00\x15' reg_expected = ((((long_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) config_expected = ((((short_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) reg_output = enc.encode_api(ch_obj) assert (reg_output == reg_expected), ('FAIL: expected regular output to be %s, but found %s' % (list(reg_expected), list(reg_output))) config_output = enc_config.encode_api(ch_obj) assert (config_output == config_expected), ('FAIL: expected configured output to be %s, but found %s' % (list(config_expected), list(config_output)))
def test_ch_encoder(): '\n \n ' config = ConfigManager() config.set('types', 'msg_len', 'U16') enc = ChEncoder() enc_config = ChEncoder(config) temp = ChTemplate(101, 'test_ch', 'test_comp', U32Type()) time_obj = TimeType(2, 0, 1533758629, 123456) ch_obj = ChData(U32Type(42), time_obj, temp) desc_bin = b'\x00\x00\x00\x01' id_bin = b'\x00\x00\x00e' time_bin = b'\x00\x02\x00[kL\xa5\x00\x01\xe2@' val_bin = b'\x00\x00\x00*' long_len_bin = b'\x00\x00\x00\x17' short_len_bin = b'\x00\x17' reg_expected = ((((long_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) config_expected = ((((short_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) reg_output = enc.encode_api(ch_obj) assert (reg_output == reg_expected), ('FAIL: expected regular output to be %s, but found %s' % (list(reg_expected), list(reg_output))) config_output = enc_config.encode_api(ch_obj) assert (config_output == config_expected), ('FAIL: expected configured output to be %s, but found %s' % (list(config_expected), list(config_output))) temp = ChTemplate(102, 'test_ch2', 'test_comp2', U16Type()) time_obj = TimeType(2, 0, 1533758628, 123457) ch_obj = ChData(U16Type(40), time_obj, temp) desc_bin = b'\x00\x00\x00\x01' id_bin = b'\x00\x00\x00f' time_bin = b'\x00\x02\x00[kL\xa4\x00\x01\xe2A' val_bin = b'\x00(' long_len_bin = b'\x00\x00\x00\x15' short_len_bin = b'\x00\x15' reg_expected = ((((long_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) config_expected = ((((short_len_bin + desc_bin) + id_bin) + time_bin) + val_bin) reg_output = enc.encode_api(ch_obj) assert (reg_output == reg_expected), ('FAIL: expected regular output to be %s, but found %s' % (list(reg_expected), list(reg_output))) config_output = enc_config.encode_api(ch_obj) assert (config_output == config_expected), ('FAIL: expected configured output to be %s, but found %s' % (list(config_expected), list(config_output)))<|docstring|>Tests the encoding of the channel encoder<|endoftext|>
4e50315eb9d11ffe0cb916a66476308e098114c1dba089eab5f0a02fbec068d1
def paste(image: Image, position: tuple=(0, 0)) -> None: '\n Paste an image onto the buffer\n :param image: Image to paste\n :param position: tuple position to paste at\n :return: None\n ' image.paste(image, position)
Paste an image onto the buffer :param image: Image to paste :param position: tuple position to paste at :return: None
libs/functions.py
paste
HcNguyen111/eReader
7
python
def paste(image: Image, position: tuple=(0, 0)) -> None: '\n Paste an image onto the buffer\n :param image: Image to paste\n :param position: tuple position to paste at\n :return: None\n ' image.paste(image, position)
def paste(image: Image, position: tuple=(0, 0)) -> None: '\n Paste an image onto the buffer\n :param image: Image to paste\n :param position: tuple position to paste at\n :return: None\n ' image.paste(image, position)<|docstring|>Paste an image onto the buffer :param image: Image to paste :param position: tuple position to paste at :return: None<|endoftext|>
6e44785d059a889797568e0b0296641d5db3118833e7e0a2e306b857efe8ef7b
def create_data_directory(version: str, force: bool=False) -> None: "\n Create subdirectories.\n\n 1. This function will check if `version` is a valid semver.\n 2. Create a directory that contains a structure if it doesn't already exist.\n Unless `force=True`.\n\n Args:\n version (str): Semver for the dataset, model and metrics.\n force (bool, optional): Flag to overwrite existing directory. Defaults to False.\n " semver.VersionInfo.parse(version) base_module_path = os.path.join(const.DATA, version) depth_level_1 = [const.CLASSIFICATION, const.NER] depth_level_2 = [const.DATASETS, const.METRICS, const.MODELS] for subdir in depth_level_1: for childdir in depth_level_2: os.makedirs(os.path.join(base_module_path, subdir, childdir), exist_ok=force)
Create subdirectories. 1. This function will check if `version` is a valid semver. 2. Create a directory that contains a structure if it doesn't already exist. Unless `force=True`. Args: version (str): Semver for the dataset, model and metrics. force (bool, optional): Flag to overwrite existing directory. Defaults to False.
slu/slu/dev/dir_setup.py
create_data_directory
Vernacular-ai/slu-service
0
python
def create_data_directory(version: str, force: bool=False) -> None: "\n Create subdirectories.\n\n 1. This function will check if `version` is a valid semver.\n 2. Create a directory that contains a structure if it doesn't already exist.\n Unless `force=True`.\n\n Args:\n version (str): Semver for the dataset, model and metrics.\n force (bool, optional): Flag to overwrite existing directory. Defaults to False.\n " semver.VersionInfo.parse(version) base_module_path = os.path.join(const.DATA, version) depth_level_1 = [const.CLASSIFICATION, const.NER] depth_level_2 = [const.DATASETS, const.METRICS, const.MODELS] for subdir in depth_level_1: for childdir in depth_level_2: os.makedirs(os.path.join(base_module_path, subdir, childdir), exist_ok=force)
def create_data_directory(version: str, force: bool=False) -> None: "\n Create subdirectories.\n\n 1. This function will check if `version` is a valid semver.\n 2. Create a directory that contains a structure if it doesn't already exist.\n Unless `force=True`.\n\n Args:\n version (str): Semver for the dataset, model and metrics.\n force (bool, optional): Flag to overwrite existing directory. Defaults to False.\n " semver.VersionInfo.parse(version) base_module_path = os.path.join(const.DATA, version) depth_level_1 = [const.CLASSIFICATION, const.NER] depth_level_2 = [const.DATASETS, const.METRICS, const.MODELS] for subdir in depth_level_1: for childdir in depth_level_2: os.makedirs(os.path.join(base_module_path, subdir, childdir), exist_ok=force)<|docstring|>Create subdirectories. 1. This function will check if `version` is a valid semver. 2. Create a directory that contains a structure if it doesn't already exist. Unless `force=True`. Args: version (str): Semver for the dataset, model and metrics. force (bool, optional): Flag to overwrite existing directory. Defaults to False.<|endoftext|>
59dafb8e121e42753dc9ec0d75fde206211dcdda7a7f9017ecd7075b12172bb8
def copy_data_directory(copy_from: str, copy_to: str, force: bool=False) -> None: "\n Copy subdirectory.\n\n 1. This function will check `copy_from` and `copy_to` are valid semver.\n 2. This function will check `copy_to` doesn't already exist.\n 3. Unless force = True, this function will not overwrite existing directory.\n\n Args:\n copy_from (str): semver -> Source directory.\n copy_to (str): semver -> Destination directory.\n force (bool, optional): Flag to overwrite existing directory. Defaults to False.\n " semver.VersionInfo.parse(copy_from) semver.VersionInfo.parse(copy_to) source = os.path.join(const.DATA, copy_from) destination = os.path.join(const.DATA, copy_to) shutil.copytree(source, destination, dirs_exist_ok=force)
Copy subdirectory. 1. This function will check `copy_from` and `copy_to` are valid semver. 2. This function will check `copy_to` doesn't already exist. 3. Unless force = True, this function will not overwrite existing directory. Args: copy_from (str): semver -> Source directory. copy_to (str): semver -> Destination directory. force (bool, optional): Flag to overwrite existing directory. Defaults to False.
slu/slu/dev/dir_setup.py
copy_data_directory
Vernacular-ai/slu-service
0
python
def copy_data_directory(copy_from: str, copy_to: str, force: bool=False) -> None: "\n Copy subdirectory.\n\n 1. This function will check `copy_from` and `copy_to` are valid semver.\n 2. This function will check `copy_to` doesn't already exist.\n 3. Unless force = True, this function will not overwrite existing directory.\n\n Args:\n copy_from (str): semver -> Source directory.\n copy_to (str): semver -> Destination directory.\n force (bool, optional): Flag to overwrite existing directory. Defaults to False.\n " semver.VersionInfo.parse(copy_from) semver.VersionInfo.parse(copy_to) source = os.path.join(const.DATA, copy_from) destination = os.path.join(const.DATA, copy_to) shutil.copytree(source, destination, dirs_exist_ok=force)
def copy_data_directory(copy_from: str, copy_to: str, force: bool=False) -> None: "\n Copy subdirectory.\n\n 1. This function will check `copy_from` and `copy_to` are valid semver.\n 2. This function will check `copy_to` doesn't already exist.\n 3. Unless force = True, this function will not overwrite existing directory.\n\n Args:\n copy_from (str): semver -> Source directory.\n copy_to (str): semver -> Destination directory.\n force (bool, optional): Flag to overwrite existing directory. Defaults to False.\n " semver.VersionInfo.parse(copy_from) semver.VersionInfo.parse(copy_to) source = os.path.join(const.DATA, copy_from) destination = os.path.join(const.DATA, copy_to) shutil.copytree(source, destination, dirs_exist_ok=force)<|docstring|>Copy subdirectory. 1. This function will check `copy_from` and `copy_to` are valid semver. 2. This function will check `copy_to` doesn't already exist. 3. Unless force = True, this function will not overwrite existing directory. Args: copy_from (str): semver -> Source directory. copy_to (str): semver -> Destination directory. force (bool, optional): Flag to overwrite existing directory. Defaults to False.<|endoftext|>
6c4af083c326fd156a096e7eca6c9f8872543558648899bb035e817129f54ba6
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.describe_addresses`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddresses>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'Addresses': [\n {\n 'AddressId': 'string',\n 'Name': 'string',\n 'Company': 'string',\n 'Street1': 'string',\n 'Street2': 'string',\n 'Street3': 'string',\n 'City': 'string',\n 'StateOrProvince': 'string',\n 'PrefectureOrDistrict': 'string',\n 'Landmark': 'string',\n 'Country': 'string',\n 'PostalCode': 'string',\n 'PhoneNumber': 'string',\n 'IsRestricted': True|False\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **Addresses** *(list) --* \n The Snowball shipping addresses that were created for this account.\n - *(dict) --* \n The address that you want the Snowball or Snowballs associated with a specific job to be shipped to. Addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. Although no individual elements of the ``Address`` are required, if the address is invalid or unsupported, then an exception is thrown.\n - **AddressId** *(string) --* \n The unique ID for an address.\n - **Name** *(string) --* \n The name of a person to receive a Snowball at an address.\n - **Company** *(string) --* \n The name of the company to receive a Snowball at an address.\n - **Street1** *(string) --* \n The first line in a street address that a Snowball is to be delivered to.\n - **Street2** *(string) --* \n The second line in a street address that a Snowball is to be delivered to.\n - **Street3** *(string) --* \n The third line in a street address that a Snowball is to be delivered to.\n - **City** *(string) --* \n The city in an address that a Snowball is to be delivered to.\n - **StateOrProvince** *(string) --* \n The state or province in an address that a Snowball is to be delivered to.\n - **PrefectureOrDistrict** *(string) --* \n This field is no longer used and the value is ignored.\n - **Landmark** *(string) --* \n This field is no longer used and the value is ignored.\n - **Country** *(string) --* \n The country in an address that a Snowball is to be delivered to.\n - **PostalCode** *(string) --* \n The postal code in an address that a Snowball is to be delivered to.\n - **PhoneNumber** *(string) --* \n The phone number associated with an address that a Snowball is to be delivered to.\n - **IsRestricted** *(boolean) --* \n If the address you are creating is a primary address, then set this option to true. This field is not supported in most regions.\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.describe_addresses`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddresses>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Addresses': [ { 'AddressId': 'string', 'Name': 'string', 'Company': 'string', 'Street1': 'string', 'Street2': 'string', 'Street3': 'string', 'City': 'string', 'StateOrProvince': 'string', 'PrefectureOrDistrict': 'string', 'Landmark': 'string', 'Country': 'string', 'PostalCode': 'string', 'PhoneNumber': 'string', 'IsRestricted': True|False }, ], } **Response Structure** - *(dict) --* - **Addresses** *(list) --* The Snowball shipping addresses that were created for this account. - *(dict) --* The address that you want the Snowball or Snowballs associated with a specific job to be shipped to. Addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. Although no individual elements of the ``Address`` are required, if the address is invalid or unsupported, then an exception is thrown. - **AddressId** *(string) --* The unique ID for an address. - **Name** *(string) --* The name of a person to receive a Snowball at an address. - **Company** *(string) --* The name of the company to receive a Snowball at an address. - **Street1** *(string) --* The first line in a street address that a Snowball is to be delivered to. - **Street2** *(string) --* The second line in a street address that a Snowball is to be delivered to. - **Street3** *(string) --* The third line in a street address that a Snowball is to be delivered to. - **City** *(string) --* The city in an address that a Snowball is to be delivered to. - **StateOrProvince** *(string) --* The state or province in an address that a Snowball is to be delivered to. - **PrefectureOrDistrict** *(string) --* This field is no longer used and the value is ignored. - **Landmark** *(string) --* This field is no longer used and the value is ignored. - **Country** *(string) --* The country in an address that a Snowball is to be delivered to. - **PostalCode** *(string) --* The postal code in an address that a Snowball is to be delivered to. - **PhoneNumber** *(string) --* The phone number associated with an address that a Snowball is to be delivered to. - **IsRestricted** *(boolean) --* If the address you are creating is a primary address, then set this option to true. This field is not supported in most regions. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:
boto3_type_annotations_with_docs/boto3_type_annotations/snowball/paginator.py
paginate
ybastide/boto3_type_annotations
119
python
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.describe_addresses`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddresses>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'Addresses': [\n {\n 'AddressId': 'string',\n 'Name': 'string',\n 'Company': 'string',\n 'Street1': 'string',\n 'Street2': 'string',\n 'Street3': 'string',\n 'City': 'string',\n 'StateOrProvince': 'string',\n 'PrefectureOrDistrict': 'string',\n 'Landmark': 'string',\n 'Country': 'string',\n 'PostalCode': 'string',\n 'PhoneNumber': 'string',\n 'IsRestricted': True|False\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **Addresses** *(list) --* \n The Snowball shipping addresses that were created for this account.\n - *(dict) --* \n The address that you want the Snowball or Snowballs associated with a specific job to be shipped to. Addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. Although no individual elements of the ``Address`` are required, if the address is invalid or unsupported, then an exception is thrown.\n - **AddressId** *(string) --* \n The unique ID for an address.\n - **Name** *(string) --* \n The name of a person to receive a Snowball at an address.\n - **Company** *(string) --* \n The name of the company to receive a Snowball at an address.\n - **Street1** *(string) --* \n The first line in a street address that a Snowball is to be delivered to.\n - **Street2** *(string) --* \n The second line in a street address that a Snowball is to be delivered to.\n - **Street3** *(string) --* \n The third line in a street address that a Snowball is to be delivered to.\n - **City** *(string) --* \n The city in an address that a Snowball is to be delivered to.\n - **StateOrProvince** *(string) --* \n The state or province in an address that a Snowball is to be delivered to.\n - **PrefectureOrDistrict** *(string) --* \n This field is no longer used and the value is ignored.\n - **Landmark** *(string) --* \n This field is no longer used and the value is ignored.\n - **Country** *(string) --* \n The country in an address that a Snowball is to be delivered to.\n - **PostalCode** *(string) --* \n The postal code in an address that a Snowball is to be delivered to.\n - **PhoneNumber** *(string) --* \n The phone number associated with an address that a Snowball is to be delivered to.\n - **IsRestricted** *(boolean) --* \n If the address you are creating is a primary address, then set this option to true. This field is not supported in most regions.\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.describe_addresses`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddresses>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'Addresses': [\n {\n 'AddressId': 'string',\n 'Name': 'string',\n 'Company': 'string',\n 'Street1': 'string',\n 'Street2': 'string',\n 'Street3': 'string',\n 'City': 'string',\n 'StateOrProvince': 'string',\n 'PrefectureOrDistrict': 'string',\n 'Landmark': 'string',\n 'Country': 'string',\n 'PostalCode': 'string',\n 'PhoneNumber': 'string',\n 'IsRestricted': True|False\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **Addresses** *(list) --* \n The Snowball shipping addresses that were created for this account.\n - *(dict) --* \n The address that you want the Snowball or Snowballs associated with a specific job to be shipped to. Addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. Although no individual elements of the ``Address`` are required, if the address is invalid or unsupported, then an exception is thrown.\n - **AddressId** *(string) --* \n The unique ID for an address.\n - **Name** *(string) --* \n The name of a person to receive a Snowball at an address.\n - **Company** *(string) --* \n The name of the company to receive a Snowball at an address.\n - **Street1** *(string) --* \n The first line in a street address that a Snowball is to be delivered to.\n - **Street2** *(string) --* \n The second line in a street address that a Snowball is to be delivered to.\n - **Street3** *(string) --* \n The third line in a street address that a Snowball is to be delivered to.\n - **City** *(string) --* \n The city in an address that a Snowball is to be delivered to.\n - **StateOrProvince** *(string) --* \n The state or province in an address that a Snowball is to be delivered to.\n - **PrefectureOrDistrict** *(string) --* \n This field is no longer used and the value is ignored.\n - **Landmark** *(string) --* \n This field is no longer used and the value is ignored.\n - **Country** *(string) --* \n The country in an address that a Snowball is to be delivered to.\n - **PostalCode** *(string) --* \n The postal code in an address that a Snowball is to be delivered to.\n - **PhoneNumber** *(string) --* \n The phone number associated with an address that a Snowball is to be delivered to.\n - **IsRestricted** *(boolean) --* \n If the address you are creating is a primary address, then set this option to true. This field is not supported in most regions.\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass<|docstring|>Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.describe_addresses`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddresses>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Addresses': [ { 'AddressId': 'string', 'Name': 'string', 'Company': 'string', 'Street1': 'string', 'Street2': 'string', 'Street3': 'string', 'City': 'string', 'StateOrProvince': 'string', 'PrefectureOrDistrict': 'string', 'Landmark': 'string', 'Country': 'string', 'PostalCode': 'string', 'PhoneNumber': 'string', 'IsRestricted': True|False }, ], } **Response Structure** - *(dict) --* - **Addresses** *(list) --* The Snowball shipping addresses that were created for this account. - *(dict) --* The address that you want the Snowball or Snowballs associated with a specific job to be shipped to. Addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. Although no individual elements of the ``Address`` are required, if the address is invalid or unsupported, then an exception is thrown. - **AddressId** *(string) --* The unique ID for an address. - **Name** *(string) --* The name of a person to receive a Snowball at an address. - **Company** *(string) --* The name of the company to receive a Snowball at an address. - **Street1** *(string) --* The first line in a street address that a Snowball is to be delivered to. - **Street2** *(string) --* The second line in a street address that a Snowball is to be delivered to. - **Street3** *(string) --* The third line in a street address that a Snowball is to be delivered to. - **City** *(string) --* The city in an address that a Snowball is to be delivered to. - **StateOrProvince** *(string) --* The state or province in an address that a Snowball is to be delivered to. - **PrefectureOrDistrict** *(string) --* This field is no longer used and the value is ignored. - **Landmark** *(string) --* This field is no longer used and the value is ignored. - **Country** *(string) --* The country in an address that a Snowball is to be delivered to. - **PostalCode** *(string) --* The postal code in an address that a Snowball is to be delivered to. - **PhoneNumber** *(string) --* The phone number associated with an address that a Snowball is to be delivered to. - **IsRestricted** *(boolean) --* If the address you are creating is a primary address, then set this option to true. This field is not supported in most regions. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:<|endoftext|>
15b5f2ec2ebfac1511fa60e16038be6809ccdfe2c3eb536dd823f0d126ce18e7
def paginate(self, ClusterId: str, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_cluster_jobs`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusterJobs>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n ClusterId='string',\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'JobListEntries': [\n {\n 'JobId': 'string',\n 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',\n 'IsMaster': True|False,\n 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE',\n 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **JobListEntries** *(list) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. \n - *(dict) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job.\n - **JobId** *(string) --* \n The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` .\n - **JobState** *(string) --* \n The current state of this job.\n - **IsMaster** *(boolean) --* \n A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created.\n - **JobType** *(string) --* \n The type of job.\n - **SnowballType** *(string) --* \n The type of device used with this job.\n - **CreationDate** *(datetime) --* \n The creation date for this job.\n - **Description** *(string) --* \n The optional description of this specific job, for example ``Important Photos 2016-08-11`` .\n :type ClusterId: string\n :param ClusterId: **[REQUIRED]**\n The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_cluster_jobs`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusterJobs>`_ **Request Syntax** :: response_iterator = paginator.paginate( ClusterId='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'JobListEntries': [ { 'JobId': 'string', 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending', 'IsMaster': True|False, 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE', 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG', 'CreationDate': datetime(2015, 1, 1), 'Description': 'string' }, ], } **Response Structure** - *(dict) --* - **JobListEntries** *(list) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. - *(dict) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job. - **JobId** *(string) --* The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` . - **JobState** *(string) --* The current state of this job. - **IsMaster** *(boolean) --* A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created. - **JobType** *(string) --* The type of job. - **SnowballType** *(string) --* The type of device used with this job. - **CreationDate** *(datetime) --* The creation date for this job. - **Description** *(string) --* The optional description of this specific job, for example ``Important Photos 2016-08-11`` . :type ClusterId: string :param ClusterId: **[REQUIRED]** The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` . :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:
boto3_type_annotations_with_docs/boto3_type_annotations/snowball/paginator.py
paginate
ybastide/boto3_type_annotations
119
python
def paginate(self, ClusterId: str, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_cluster_jobs`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusterJobs>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n ClusterId='string',\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'JobListEntries': [\n {\n 'JobId': 'string',\n 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',\n 'IsMaster': True|False,\n 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE',\n 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **JobListEntries** *(list) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. \n - *(dict) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job.\n - **JobId** *(string) --* \n The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` .\n - **JobState** *(string) --* \n The current state of this job.\n - **IsMaster** *(boolean) --* \n A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created.\n - **JobType** *(string) --* \n The type of job.\n - **SnowballType** *(string) --* \n The type of device used with this job.\n - **CreationDate** *(datetime) --* \n The creation date for this job.\n - **Description** *(string) --* \n The optional description of this specific job, for example ``Important Photos 2016-08-11`` .\n :type ClusterId: string\n :param ClusterId: **[REQUIRED]**\n The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
def paginate(self, ClusterId: str, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_cluster_jobs`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusterJobs>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n ClusterId='string',\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'JobListEntries': [\n {\n 'JobId': 'string',\n 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',\n 'IsMaster': True|False,\n 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE',\n 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **JobListEntries** *(list) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. \n - *(dict) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job.\n - **JobId** *(string) --* \n The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` .\n - **JobState** *(string) --* \n The current state of this job.\n - **IsMaster** *(boolean) --* \n A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created.\n - **JobType** *(string) --* \n The type of job.\n - **SnowballType** *(string) --* \n The type of device used with this job.\n - **CreationDate** *(datetime) --* \n The creation date for this job.\n - **Description** *(string) --* \n The optional description of this specific job, for example ``Important Photos 2016-08-11`` .\n :type ClusterId: string\n :param ClusterId: **[REQUIRED]**\n The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass<|docstring|>Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_cluster_jobs`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusterJobs>`_ **Request Syntax** :: response_iterator = paginator.paginate( ClusterId='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'JobListEntries': [ { 'JobId': 'string', 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending', 'IsMaster': True|False, 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE', 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG', 'CreationDate': datetime(2015, 1, 1), 'Description': 'string' }, ], } **Response Structure** - *(dict) --* - **JobListEntries** *(list) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. - *(dict) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job. - **JobId** *(string) --* The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` . - **JobState** *(string) --* The current state of this job. - **IsMaster** *(boolean) --* A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created. - **JobType** *(string) --* The type of job. - **SnowballType** *(string) --* The type of device used with this job. - **CreationDate** *(datetime) --* The creation date for this job. - **Description** *(string) --* The optional description of this specific job, for example ``Important Photos 2016-08-11`` . :type ClusterId: string :param ClusterId: **[REQUIRED]** The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` . :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:<|endoftext|>
4caa0914f67a0870a14ede253d2e12a8015deb52643461b83017cd2f333627c6
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_clusters`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusters>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'ClusterListEntries': [\n {\n 'ClusterId': 'string',\n 'ClusterState': 'AwaitingQuorum'|'Pending'|'InUse'|'Complete'|'Cancelled',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **ClusterListEntries** *(list) --* \n Each ``ClusterListEntry`` object contains a cluster's state, a cluster's ID, and other important status information.\n - *(dict) --* \n Contains a cluster's state, a cluster's ID, and other important information.\n - **ClusterId** *(string) --* \n The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` .\n - **ClusterState** *(string) --* \n The current state of this cluster. For information about the state of a specific node, see JobListEntry$JobState .\n - **CreationDate** *(datetime) --* \n The creation date for this cluster.\n - **Description** *(string) --* \n Defines an optional description of the cluster, for example ``Environmental Data Cluster-01`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_clusters`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusters>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'ClusterListEntries': [ { 'ClusterId': 'string', 'ClusterState': 'AwaitingQuorum'|'Pending'|'InUse'|'Complete'|'Cancelled', 'CreationDate': datetime(2015, 1, 1), 'Description': 'string' }, ], } **Response Structure** - *(dict) --* - **ClusterListEntries** *(list) --* Each ``ClusterListEntry`` object contains a cluster's state, a cluster's ID, and other important status information. - *(dict) --* Contains a cluster's state, a cluster's ID, and other important information. - **ClusterId** *(string) --* The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` . - **ClusterState** *(string) --* The current state of this cluster. For information about the state of a specific node, see JobListEntry$JobState . - **CreationDate** *(datetime) --* The creation date for this cluster. - **Description** *(string) --* Defines an optional description of the cluster, for example ``Environmental Data Cluster-01`` . :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:
boto3_type_annotations_with_docs/boto3_type_annotations/snowball/paginator.py
paginate
ybastide/boto3_type_annotations
119
python
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_clusters`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusters>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'ClusterListEntries': [\n {\n 'ClusterId': 'string',\n 'ClusterState': 'AwaitingQuorum'|'Pending'|'InUse'|'Complete'|'Cancelled',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **ClusterListEntries** *(list) --* \n Each ``ClusterListEntry`` object contains a cluster's state, a cluster's ID, and other important status information.\n - *(dict) --* \n Contains a cluster's state, a cluster's ID, and other important information.\n - **ClusterId** *(string) --* \n The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` .\n - **ClusterState** *(string) --* \n The current state of this cluster. For information about the state of a specific node, see JobListEntry$JobState .\n - **CreationDate** *(datetime) --* \n The creation date for this cluster.\n - **Description** *(string) --* \n Defines an optional description of the cluster, for example ``Environmental Data Cluster-01`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_clusters`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusters>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'ClusterListEntries': [\n {\n 'ClusterId': 'string',\n 'ClusterState': 'AwaitingQuorum'|'Pending'|'InUse'|'Complete'|'Cancelled',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **ClusterListEntries** *(list) --* \n Each ``ClusterListEntry`` object contains a cluster's state, a cluster's ID, and other important status information.\n - *(dict) --* \n Contains a cluster's state, a cluster's ID, and other important information.\n - **ClusterId** *(string) --* \n The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` .\n - **ClusterState** *(string) --* \n The current state of this cluster. For information about the state of a specific node, see JobListEntry$JobState .\n - **CreationDate** *(datetime) --* \n The creation date for this cluster.\n - **Description** *(string) --* \n Defines an optional description of the cluster, for example ``Environmental Data Cluster-01`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass<|docstring|>Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_clusters`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusters>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'ClusterListEntries': [ { 'ClusterId': 'string', 'ClusterState': 'AwaitingQuorum'|'Pending'|'InUse'|'Complete'|'Cancelled', 'CreationDate': datetime(2015, 1, 1), 'Description': 'string' }, ], } **Response Structure** - *(dict) --* - **ClusterListEntries** *(list) --* Each ``ClusterListEntry`` object contains a cluster's state, a cluster's ID, and other important status information. - *(dict) --* Contains a cluster's state, a cluster's ID, and other important information. - **ClusterId** *(string) --* The 39-character ID for the cluster that you want to list, for example ``CID123e4567-e89b-12d3-a456-426655440000`` . - **ClusterState** *(string) --* The current state of this cluster. For information about the state of a specific node, see JobListEntry$JobState . - **CreationDate** *(datetime) --* The creation date for this cluster. - **Description** *(string) --* Defines an optional description of the cluster, for example ``Environmental Data Cluster-01`` . :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:<|endoftext|>
4bfba236766f5565d5ab92d221a4956c6a8d1c705c9f6757e118b97e71c39613
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_compatible_images`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListCompatibleImages>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'CompatibleImages': [\n {\n 'AmiId': 'string',\n 'Name': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **CompatibleImages** *(list) --* \n A JSON-formatted object that describes a compatible AMI.\n - *(dict) --* \n A JSON-formatted object that describes a compatible Amazon Machine Image (AMI). For more information on compatible AMIs, see `Using Amazon EC2 Compute Instances <http://docs.aws.amazon.com/snowball/latest/developer-guide/using-ec2.html>`__ in the *AWS Snowball Developer Guide* .\n - **AmiId** *(string) --* \n The unique identifier for an individual Snowball Edge AMI.\n - **Name** *(string) --* \n The optional name of a compatible image.\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_compatible_images`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListCompatibleImages>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'CompatibleImages': [ { 'AmiId': 'string', 'Name': 'string' }, ], } **Response Structure** - *(dict) --* - **CompatibleImages** *(list) --* A JSON-formatted object that describes a compatible AMI. - *(dict) --* A JSON-formatted object that describes a compatible Amazon Machine Image (AMI). For more information on compatible AMIs, see `Using Amazon EC2 Compute Instances <http://docs.aws.amazon.com/snowball/latest/developer-guide/using-ec2.html>`__ in the *AWS Snowball Developer Guide* . - **AmiId** *(string) --* The unique identifier for an individual Snowball Edge AMI. - **Name** *(string) --* The optional name of a compatible image. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:
boto3_type_annotations_with_docs/boto3_type_annotations/snowball/paginator.py
paginate
ybastide/boto3_type_annotations
119
python
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_compatible_images`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListCompatibleImages>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'CompatibleImages': [\n {\n 'AmiId': 'string',\n 'Name': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **CompatibleImages** *(list) --* \n A JSON-formatted object that describes a compatible AMI.\n - *(dict) --* \n A JSON-formatted object that describes a compatible Amazon Machine Image (AMI). For more information on compatible AMIs, see `Using Amazon EC2 Compute Instances <http://docs.aws.amazon.com/snowball/latest/developer-guide/using-ec2.html>`__ in the *AWS Snowball Developer Guide* .\n - **AmiId** *(string) --* \n The unique identifier for an individual Snowball Edge AMI.\n - **Name** *(string) --* \n The optional name of a compatible image.\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_compatible_images`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListCompatibleImages>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'CompatibleImages': [\n {\n 'AmiId': 'string',\n 'Name': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **CompatibleImages** *(list) --* \n A JSON-formatted object that describes a compatible AMI.\n - *(dict) --* \n A JSON-formatted object that describes a compatible Amazon Machine Image (AMI). For more information on compatible AMIs, see `Using Amazon EC2 Compute Instances <http://docs.aws.amazon.com/snowball/latest/developer-guide/using-ec2.html>`__ in the *AWS Snowball Developer Guide* .\n - **AmiId** *(string) --* \n The unique identifier for an individual Snowball Edge AMI.\n - **Name** *(string) --* \n The optional name of a compatible image.\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass<|docstring|>Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_compatible_images`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListCompatibleImages>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'CompatibleImages': [ { 'AmiId': 'string', 'Name': 'string' }, ], } **Response Structure** - *(dict) --* - **CompatibleImages** *(list) --* A JSON-formatted object that describes a compatible AMI. - *(dict) --* A JSON-formatted object that describes a compatible Amazon Machine Image (AMI). For more information on compatible AMIs, see `Using Amazon EC2 Compute Instances <http://docs.aws.amazon.com/snowball/latest/developer-guide/using-ec2.html>`__ in the *AWS Snowball Developer Guide* . - **AmiId** *(string) --* The unique identifier for an individual Snowball Edge AMI. - **Name** *(string) --* The optional name of a compatible image. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:<|endoftext|>
fa966a47d64edefe045d1d16fdaecd124a15cfedeace6ef7185df66a7d2133cb
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_jobs`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListJobs>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'JobListEntries': [\n {\n 'JobId': 'string',\n 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',\n 'IsMaster': True|False,\n 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE',\n 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **JobListEntries** *(list) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. \n - *(dict) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job.\n - **JobId** *(string) --* \n The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` .\n - **JobState** *(string) --* \n The current state of this job.\n - **IsMaster** *(boolean) --* \n A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created.\n - **JobType** *(string) --* \n The type of job.\n - **SnowballType** *(string) --* \n The type of device used with this job.\n - **CreationDate** *(datetime) --* \n The creation date for this job.\n - **Description** *(string) --* \n The optional description of this specific job, for example ``Important Photos 2016-08-11`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_jobs`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListJobs>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'JobListEntries': [ { 'JobId': 'string', 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending', 'IsMaster': True|False, 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE', 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG', 'CreationDate': datetime(2015, 1, 1), 'Description': 'string' }, ], } **Response Structure** - *(dict) --* - **JobListEntries** *(list) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. - *(dict) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job. - **JobId** *(string) --* The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` . - **JobState** *(string) --* The current state of this job. - **IsMaster** *(boolean) --* A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created. - **JobType** *(string) --* The type of job. - **SnowballType** *(string) --* The type of device used with this job. - **CreationDate** *(datetime) --* The creation date for this job. - **Description** *(string) --* The optional description of this specific job, for example ``Important Photos 2016-08-11`` . :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:
boto3_type_annotations_with_docs/boto3_type_annotations/snowball/paginator.py
paginate
ybastide/boto3_type_annotations
119
python
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_jobs`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListJobs>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'JobListEntries': [\n {\n 'JobId': 'string',\n 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',\n 'IsMaster': True|False,\n 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE',\n 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **JobListEntries** *(list) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. \n - *(dict) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job.\n - **JobId** *(string) --* \n The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` .\n - **JobState** *(string) --* \n The current state of this job.\n - **IsMaster** *(boolean) --* \n A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created.\n - **JobType** *(string) --* \n The type of job.\n - **SnowballType** *(string) --* \n The type of device used with this job.\n - **CreationDate** *(datetime) --* \n The creation date for this job.\n - **Description** *(string) --* \n The optional description of this specific job, for example ``Important Photos 2016-08-11`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass
def paginate(self, PaginationConfig: Dict=None) -> Dict: "\n Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_jobs`.\n See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListJobs>`_\n \n **Request Syntax**\n ::\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'MaxItems': 123,\n 'PageSize': 123,\n 'StartingToken': 'string'\n }\n )\n \n **Response Syntax**\n ::\n {\n 'JobListEntries': [\n {\n 'JobId': 'string',\n 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',\n 'IsMaster': True|False,\n 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE',\n 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG',\n 'CreationDate': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ],\n }\n \n **Response Structure**\n - *(dict) --* \n - **JobListEntries** *(list) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. \n - *(dict) --* \n Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job.\n - **JobId** *(string) --* \n The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` .\n - **JobState** *(string) --* \n The current state of this job.\n - **IsMaster** *(boolean) --* \n A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created.\n - **JobType** *(string) --* \n The type of job.\n - **SnowballType** *(string) --* \n The type of device used with this job.\n - **CreationDate** *(datetime) --* \n The creation date for this job.\n - **Description** *(string) --* \n The optional description of this specific job, for example ``Important Photos 2016-08-11`` .\n :type PaginationConfig: dict\n :param PaginationConfig:\n A dictionary that provides parameters to control pagination.\n - **MaxItems** *(integer) --*\n The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.\n - **PageSize** *(integer) --*\n The size of each page.\n - **StartingToken** *(string) --*\n A token to specify where to start paginating. This is the ``NextToken`` from a previous response.\n :rtype: dict\n :returns:\n " pass<|docstring|>Creates an iterator that will paginate through responses from :py:meth:`Snowball.Client.list_jobs`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListJobs>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'JobListEntries': [ { 'JobId': 'string', 'JobState': 'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWSSortingFacility'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending', 'IsMaster': True|False, 'JobType': 'IMPORT'|'EXPORT'|'LOCAL_USE', 'SnowballType': 'STANDARD'|'EDGE'|'EDGE_C'|'EDGE_CG', 'CreationDate': datetime(2015, 1, 1), 'Description': 'string' }, ], } **Response Structure** - *(dict) --* - **JobListEntries** *(list) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of export jobs. - *(dict) --* Each ``JobListEntry`` object contains a job's state, a job's ID, and a value that indicates whether the job is a job part, in the case of an export job. - **JobId** *(string) --* The automatically generated ID for a job, for example ``JID123e4567-e89b-12d3-a456-426655440000`` . - **JobState** *(string) --* The current state of this job. - **IsMaster** *(boolean) --* A value that indicates that this job is a master job. A master job represents a successful request to create an export job. Master jobs aren't associated with any Snowballs. Instead, each master job will have at least one job part, and each job part is associated with a Snowball. It might take some time before the job parts associated with a particular master job are listed, because they are created after the master job is created. - **JobType** *(string) --* The type of job. - **SnowballType** *(string) --* The type of device used with this job. - **CreationDate** *(datetime) --* The creation date for this job. - **Description** *(string) --* The optional description of this specific job, for example ``Important Photos 2016-08-11`` . :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:<|endoftext|>
bdce3aae62fa855c1f3fe6280af1aab1d54c53af4e85fa39173ca4316827d991
def report(self) -> None: '\n Print a summary report about self.graph\n ' log.info('Total nodes in {}: {}'.format((self.graph.name or 'graph'), len(self.graph.nodes()))) log.info('Total edges in {}: {}'.format((self.graph.name or 'graph'), len(self.graph.edges())))
Print a summary report about self.graph
kgx/transformers/transformer.py
report
stevencox/kgx
0
python
def report(self) -> None: '\n \n ' log.info('Total nodes in {}: {}'.format((self.graph.name or 'graph'), len(self.graph.nodes()))) log.info('Total edges in {}: {}'.format((self.graph.name or 'graph'), len(self.graph.edges())))
def report(self) -> None: '\n \n ' log.info('Total nodes in {}: {}'.format((self.graph.name or 'graph'), len(self.graph.nodes()))) log.info('Total edges in {}: {}'.format((self.graph.name or 'graph'), len(self.graph.edges())))<|docstring|>Print a summary report about self.graph<|endoftext|>
f7c98250692cba67c835385c89de398d6d8cf0a968116422d9e3217188d162c5
def is_empty(self) -> bool: '\n Check whether self.graph is empty.\n\n Returns\n -------\n bool\n A boolean value asserting whether the graph is empty or not\n\n ' return ((len(self.graph.nodes()) == 0) and (len(self.graph.edges()) == 0))
Check whether self.graph is empty. Returns ------- bool A boolean value asserting whether the graph is empty or not
kgx/transformers/transformer.py
is_empty
stevencox/kgx
0
python
def is_empty(self) -> bool: '\n Check whether self.graph is empty.\n\n Returns\n -------\n bool\n A boolean value asserting whether the graph is empty or not\n\n ' return ((len(self.graph.nodes()) == 0) and (len(self.graph.edges()) == 0))
def is_empty(self) -> bool: '\n Check whether self.graph is empty.\n\n Returns\n -------\n bool\n A boolean value asserting whether the graph is empty or not\n\n ' return ((len(self.graph.nodes()) == 0) and (len(self.graph.edges()) == 0))<|docstring|>Check whether self.graph is empty. Returns ------- bool A boolean value asserting whether the graph is empty or not<|endoftext|>
1a353784ac4276ebe674da9505ae073e6e8684a5cccb0d7ec92db8627c1e253c
def set_node_filter(self, key: str, value: Union[(str, set)]) -> None: "\n Set a node filter, as defined by a key and value pair.\n These filters are used to create a subgraph or reduce the\n search space when fetching nodes from a source.\n\n .. note::\n When defining the 'category' filter, the value should be of type ``set``.\n This method also sets the 'subject_category' and 'object_category'\n edge filters, to get a consistent set of nodes in the subgraph.\n\n Parameters\n ----------\n key: str\n The key for node filter\n value: Union[str, set]\n The value for the node filter. Can be either a string or a set.\n\n " if (key == 'category'): if isinstance(value, set): if ('subject_category' in self.edge_filters): self.edge_filters['subject_category'].update(value) else: self.edge_filters['subject_category'] = value if ('object_category' in self.edge_filters): self.edge_filters['object_category'].update(value) else: self.edge_filters['object_category'] = value else: raise TypeError("'category' node filter should have a value of type 'set'") if (key in self.node_filters): self.node_filters[key].update(value) else: self.node_filters[key] = value
Set a node filter, as defined by a key and value pair. These filters are used to create a subgraph or reduce the search space when fetching nodes from a source. .. note:: When defining the 'category' filter, the value should be of type ``set``. This method also sets the 'subject_category' and 'object_category' edge filters, to get a consistent set of nodes in the subgraph. Parameters ---------- key: str The key for node filter value: Union[str, set] The value for the node filter. Can be either a string or a set.
kgx/transformers/transformer.py
set_node_filter
stevencox/kgx
0
python
def set_node_filter(self, key: str, value: Union[(str, set)]) -> None: "\n Set a node filter, as defined by a key and value pair.\n These filters are used to create a subgraph or reduce the\n search space when fetching nodes from a source.\n\n .. note::\n When defining the 'category' filter, the value should be of type ``set``.\n This method also sets the 'subject_category' and 'object_category'\n edge filters, to get a consistent set of nodes in the subgraph.\n\n Parameters\n ----------\n key: str\n The key for node filter\n value: Union[str, set]\n The value for the node filter. Can be either a string or a set.\n\n " if (key == 'category'): if isinstance(value, set): if ('subject_category' in self.edge_filters): self.edge_filters['subject_category'].update(value) else: self.edge_filters['subject_category'] = value if ('object_category' in self.edge_filters): self.edge_filters['object_category'].update(value) else: self.edge_filters['object_category'] = value else: raise TypeError("'category' node filter should have a value of type 'set'") if (key in self.node_filters): self.node_filters[key].update(value) else: self.node_filters[key] = value
def set_node_filter(self, key: str, value: Union[(str, set)]) -> None: "\n Set a node filter, as defined by a key and value pair.\n These filters are used to create a subgraph or reduce the\n search space when fetching nodes from a source.\n\n .. note::\n When defining the 'category' filter, the value should be of type ``set``.\n This method also sets the 'subject_category' and 'object_category'\n edge filters, to get a consistent set of nodes in the subgraph.\n\n Parameters\n ----------\n key: str\n The key for node filter\n value: Union[str, set]\n The value for the node filter. Can be either a string or a set.\n\n " if (key == 'category'): if isinstance(value, set): if ('subject_category' in self.edge_filters): self.edge_filters['subject_category'].update(value) else: self.edge_filters['subject_category'] = value if ('object_category' in self.edge_filters): self.edge_filters['object_category'].update(value) else: self.edge_filters['object_category'] = value else: raise TypeError("'category' node filter should have a value of type 'set'") if (key in self.node_filters): self.node_filters[key].update(value) else: self.node_filters[key] = value<|docstring|>Set a node filter, as defined by a key and value pair. These filters are used to create a subgraph or reduce the search space when fetching nodes from a source. .. note:: When defining the 'category' filter, the value should be of type ``set``. This method also sets the 'subject_category' and 'object_category' edge filters, to get a consistent set of nodes in the subgraph. Parameters ---------- key: str The key for node filter value: Union[str, set] The value for the node filter. Can be either a string or a set.<|endoftext|>
37b148d5981923da2c29dbbfefd33cebee26bf4dbc65f6e5f6923922eba7da5e
def set_edge_filter(self, key: str, value: set) -> None: "\n Set an edge filter, as defined by a key and value pair.\n These filters are used to create a subgraph or reduce the\n search space when fetching edges from a source.\n\n .. note::\n When defining the 'subject_category' or 'object_category' filter,\n the value should be of type ``set``.\n This method also sets the 'category' node filter, to get a\n consistent set of nodes in the subgraph.\n\n Parameters\n ----------\n key: str\n The key for edge filter\n value: Union[str, set]\n The value for the edge filter. Can be either a string or a set.\n\n " if (key in {'subject_category', 'object_category'}): if isinstance(value, set): if ('category' in self.node_filters): self.node_filters['category'].update(value) else: self.node_filters['category'] = value else: raise TypeError(f"'{key}' edge filter should have a value of type 'set'") if (key in self.edge_filters): self.edge_filters[key].update(value) else: self.edge_filters[key] = value
Set an edge filter, as defined by a key and value pair. These filters are used to create a subgraph or reduce the search space when fetching edges from a source. .. note:: When defining the 'subject_category' or 'object_category' filter, the value should be of type ``set``. This method also sets the 'category' node filter, to get a consistent set of nodes in the subgraph. Parameters ---------- key: str The key for edge filter value: Union[str, set] The value for the edge filter. Can be either a string or a set.
kgx/transformers/transformer.py
set_edge_filter
stevencox/kgx
0
python
def set_edge_filter(self, key: str, value: set) -> None: "\n Set an edge filter, as defined by a key and value pair.\n These filters are used to create a subgraph or reduce the\n search space when fetching edges from a source.\n\n .. note::\n When defining the 'subject_category' or 'object_category' filter,\n the value should be of type ``set``.\n This method also sets the 'category' node filter, to get a\n consistent set of nodes in the subgraph.\n\n Parameters\n ----------\n key: str\n The key for edge filter\n value: Union[str, set]\n The value for the edge filter. Can be either a string or a set.\n\n " if (key in {'subject_category', 'object_category'}): if isinstance(value, set): if ('category' in self.node_filters): self.node_filters['category'].update(value) else: self.node_filters['category'] = value else: raise TypeError(f"'{key}' edge filter should have a value of type 'set'") if (key in self.edge_filters): self.edge_filters[key].update(value) else: self.edge_filters[key] = value
def set_edge_filter(self, key: str, value: set) -> None: "\n Set an edge filter, as defined by a key and value pair.\n These filters are used to create a subgraph or reduce the\n search space when fetching edges from a source.\n\n .. note::\n When defining the 'subject_category' or 'object_category' filter,\n the value should be of type ``set``.\n This method also sets the 'category' node filter, to get a\n consistent set of nodes in the subgraph.\n\n Parameters\n ----------\n key: str\n The key for edge filter\n value: Union[str, set]\n The value for the edge filter. Can be either a string or a set.\n\n " if (key in {'subject_category', 'object_category'}): if isinstance(value, set): if ('category' in self.node_filters): self.node_filters['category'].update(value) else: self.node_filters['category'] = value else: raise TypeError(f"'{key}' edge filter should have a value of type 'set'") if (key in self.edge_filters): self.edge_filters[key].update(value) else: self.edge_filters[key] = value<|docstring|>Set an edge filter, as defined by a key and value pair. These filters are used to create a subgraph or reduce the search space when fetching edges from a source. .. note:: When defining the 'subject_category' or 'object_category' filter, the value should be of type ``set``. This method also sets the 'category' node filter, to get a consistent set of nodes in the subgraph. Parameters ---------- key: str The key for edge filter value: Union[str, set] The value for the edge filter. Can be either a string or a set.<|endoftext|>
c8b92d885bc45bf6e300e2b62653972aa76eeeaf1fc4e359cf473415f5d986c1
@staticmethod def serialize(g: nx.MultiDiGraph) -> Dict: '\n Convert networkx.MultiDiGraph as a dictionary.\n\n Parameters\n ----------\n g: networkx.MultiDiGraph\n Graph to convert as a dictionary\n\n Returns\n -------\n dict\n A dictionary\n\n ' data = json_graph.node_link_data(g) return data
Convert networkx.MultiDiGraph as a dictionary. Parameters ---------- g: networkx.MultiDiGraph Graph to convert as a dictionary Returns ------- dict A dictionary
kgx/transformers/transformer.py
serialize
stevencox/kgx
0
python
@staticmethod def serialize(g: nx.MultiDiGraph) -> Dict: '\n Convert networkx.MultiDiGraph as a dictionary.\n\n Parameters\n ----------\n g: networkx.MultiDiGraph\n Graph to convert as a dictionary\n\n Returns\n -------\n dict\n A dictionary\n\n ' data = json_graph.node_link_data(g) return data
@staticmethod def serialize(g: nx.MultiDiGraph) -> Dict: '\n Convert networkx.MultiDiGraph as a dictionary.\n\n Parameters\n ----------\n g: networkx.MultiDiGraph\n Graph to convert as a dictionary\n\n Returns\n -------\n dict\n A dictionary\n\n ' data = json_graph.node_link_data(g) return data<|docstring|>Convert networkx.MultiDiGraph as a dictionary. Parameters ---------- g: networkx.MultiDiGraph Graph to convert as a dictionary Returns ------- dict A dictionary<|endoftext|>
f89754ca11ad97681e35d0f75bf71a0d47d54853b8373a96bf0fe846f48dd9d4
@staticmethod def dump_to_file(g: nx.MultiDiGraph, filename: str) -> None: '\n Serialize networkx.MultiDiGraph as JSON and write to file.\n\n Parameters\n ----------\n g: networkx.MultiDiGraph\n Graph to convert as a dictionary\n filename: str\n File to write the JSON\n\n ' FH = open(filename, 'w') json_data = Transformer.serialize(g) FH.write(json.dumps(json_data)) FH.close()
Serialize networkx.MultiDiGraph as JSON and write to file. Parameters ---------- g: networkx.MultiDiGraph Graph to convert as a dictionary filename: str File to write the JSON
kgx/transformers/transformer.py
dump_to_file
stevencox/kgx
0
python
@staticmethod def dump_to_file(g: nx.MultiDiGraph, filename: str) -> None: '\n Serialize networkx.MultiDiGraph as JSON and write to file.\n\n Parameters\n ----------\n g: networkx.MultiDiGraph\n Graph to convert as a dictionary\n filename: str\n File to write the JSON\n\n ' FH = open(filename, 'w') json_data = Transformer.serialize(g) FH.write(json.dumps(json_data)) FH.close()
@staticmethod def dump_to_file(g: nx.MultiDiGraph, filename: str) -> None: '\n Serialize networkx.MultiDiGraph as JSON and write to file.\n\n Parameters\n ----------\n g: networkx.MultiDiGraph\n Graph to convert as a dictionary\n filename: str\n File to write the JSON\n\n ' FH = open(filename, 'w') json_data = Transformer.serialize(g) FH.write(json.dumps(json_data)) FH.close()<|docstring|>Serialize networkx.MultiDiGraph as JSON and write to file. Parameters ---------- g: networkx.MultiDiGraph Graph to convert as a dictionary filename: str File to write the JSON<|endoftext|>
0621074e1ed14823a229c9f7aeb92d09a7bcf92acf170468b736554f86ad0281
@staticmethod def deserialize(data: Dict) -> nx.MultiDiGraph: '\n Deserialize a networkx.MultiDiGraph from a dictionary.\n\n Parameters\n ----------\n data: dict\n Dictionary containing nodes and edges\n\n Returns\n -------\n networkx.MultiDiGraph\n A networkx.MultiDiGraph representation\n\n ' g = json_graph.node_link_graph(data) return g
Deserialize a networkx.MultiDiGraph from a dictionary. Parameters ---------- data: dict Dictionary containing nodes and edges Returns ------- networkx.MultiDiGraph A networkx.MultiDiGraph representation
kgx/transformers/transformer.py
deserialize
stevencox/kgx
0
python
@staticmethod def deserialize(data: Dict) -> nx.MultiDiGraph: '\n Deserialize a networkx.MultiDiGraph from a dictionary.\n\n Parameters\n ----------\n data: dict\n Dictionary containing nodes and edges\n\n Returns\n -------\n networkx.MultiDiGraph\n A networkx.MultiDiGraph representation\n\n ' g = json_graph.node_link_graph(data) return g
@staticmethod def deserialize(data: Dict) -> nx.MultiDiGraph: '\n Deserialize a networkx.MultiDiGraph from a dictionary.\n\n Parameters\n ----------\n data: dict\n Dictionary containing nodes and edges\n\n Returns\n -------\n networkx.MultiDiGraph\n A networkx.MultiDiGraph representation\n\n ' g = json_graph.node_link_graph(data) return g<|docstring|>Deserialize a networkx.MultiDiGraph from a dictionary. Parameters ---------- data: dict Dictionary containing nodes and edges Returns ------- networkx.MultiDiGraph A networkx.MultiDiGraph representation<|endoftext|>
88ac1b7703448ea7c0ed285b9a742f15dbed559a8df4b55835d9365907b0dc11
@staticmethod def restore_from_file(filename) -> nx.MultiDiGraph: '\n Deserialize a networkx.MultiDiGraph from a JSON file.\n\n Parameters\n ----------\n filename: str\n File to read from\n\n Returns\n -------\n networkx.MultiDiGraph\n A networkx.MultiDiGraph representation\n\n ' FH = open(filename, 'r') data = FH.read() g = Transformer.deserialize(json.loads(data)) return g
Deserialize a networkx.MultiDiGraph from a JSON file. Parameters ---------- filename: str File to read from Returns ------- networkx.MultiDiGraph A networkx.MultiDiGraph representation
kgx/transformers/transformer.py
restore_from_file
stevencox/kgx
0
python
@staticmethod def restore_from_file(filename) -> nx.MultiDiGraph: '\n Deserialize a networkx.MultiDiGraph from a JSON file.\n\n Parameters\n ----------\n filename: str\n File to read from\n\n Returns\n -------\n networkx.MultiDiGraph\n A networkx.MultiDiGraph representation\n\n ' FH = open(filename, 'r') data = FH.read() g = Transformer.deserialize(json.loads(data)) return g
@staticmethod def restore_from_file(filename) -> nx.MultiDiGraph: '\n Deserialize a networkx.MultiDiGraph from a JSON file.\n\n Parameters\n ----------\n filename: str\n File to read from\n\n Returns\n -------\n networkx.MultiDiGraph\n A networkx.MultiDiGraph representation\n\n ' FH = open(filename, 'r') data = FH.read() g = Transformer.deserialize(json.loads(data)) return g<|docstring|>Deserialize a networkx.MultiDiGraph from a JSON file. Parameters ---------- filename: str File to read from Returns ------- networkx.MultiDiGraph A networkx.MultiDiGraph representation<|endoftext|>
2349970a52b08e8af228beda8c586d72d9522355b6ddff3a064404f2cba2d12b
@staticmethod def validate_node(node: dict) -> dict: '\n Given a node as a dictionary, check for required properties.\n This method will return the node dictionary with default assumptions applied, if any.\n\n Parameters\n ----------\n node: dict\n A node represented as a dict\n\n Returns\n -------\n dict\n A node represented as a dict, with default assumptions applied.\n\n ' if (len(node) == 0): log.debug('Empty node encountered: {}'.format(node)) return node if ('id' not in node): raise KeyError("node does not have 'id' property: {}".format(node)) if ('name' not in node): log.debug("node does not have 'name' property: {}".format(node)) if ('category' not in node): log.debug("node does not have 'category' property: {}\nUsing {} as default".format(node, Transformer.DEFAULT_NODE_CATEGORY)) node['category'] = [Transformer.DEFAULT_NODE_CATEGORY] return node
Given a node as a dictionary, check for required properties. This method will return the node dictionary with default assumptions applied, if any. Parameters ---------- node: dict A node represented as a dict Returns ------- dict A node represented as a dict, with default assumptions applied.
kgx/transformers/transformer.py
validate_node
stevencox/kgx
0
python
@staticmethod def validate_node(node: dict) -> dict: '\n Given a node as a dictionary, check for required properties.\n This method will return the node dictionary with default assumptions applied, if any.\n\n Parameters\n ----------\n node: dict\n A node represented as a dict\n\n Returns\n -------\n dict\n A node represented as a dict, with default assumptions applied.\n\n ' if (len(node) == 0): log.debug('Empty node encountered: {}'.format(node)) return node if ('id' not in node): raise KeyError("node does not have 'id' property: {}".format(node)) if ('name' not in node): log.debug("node does not have 'name' property: {}".format(node)) if ('category' not in node): log.debug("node does not have 'category' property: {}\nUsing {} as default".format(node, Transformer.DEFAULT_NODE_CATEGORY)) node['category'] = [Transformer.DEFAULT_NODE_CATEGORY] return node
@staticmethod def validate_node(node: dict) -> dict: '\n Given a node as a dictionary, check for required properties.\n This method will return the node dictionary with default assumptions applied, if any.\n\n Parameters\n ----------\n node: dict\n A node represented as a dict\n\n Returns\n -------\n dict\n A node represented as a dict, with default assumptions applied.\n\n ' if (len(node) == 0): log.debug('Empty node encountered: {}'.format(node)) return node if ('id' not in node): raise KeyError("node does not have 'id' property: {}".format(node)) if ('name' not in node): log.debug("node does not have 'name' property: {}".format(node)) if ('category' not in node): log.debug("node does not have 'category' property: {}\nUsing {} as default".format(node, Transformer.DEFAULT_NODE_CATEGORY)) node['category'] = [Transformer.DEFAULT_NODE_CATEGORY] return node<|docstring|>Given a node as a dictionary, check for required properties. This method will return the node dictionary with default assumptions applied, if any. Parameters ---------- node: dict A node represented as a dict Returns ------- dict A node represented as a dict, with default assumptions applied.<|endoftext|>
e0280523e66b8c6f85699eba3c98a7f06d6f0baed2b9aec9f5de72d258e3a8d0
@staticmethod def validate_edge(edge: dict) -> dict: '\n Given an edge as a dictionary, check for required properties.\n This method will return the edge dictionary with default assumptions applied, if any.\n\n Parameters\n ----------\n edge: dict\n An edge represented as a dict\n\n Returns\n -------\n dict\n An edge represented as a dict, with default assumptions applied.\n ' if ('subject' not in edge): raise KeyError("edge does not have 'subject' property: {}".format(edge)) if ('edge_label' not in edge): raise KeyError("edge does not have 'edge_label' property: {}".format(edge)) if ('object' not in edge): raise KeyError("edge does not have 'object' property: {}".format(edge)) return edge
Given an edge as a dictionary, check for required properties. This method will return the edge dictionary with default assumptions applied, if any. Parameters ---------- edge: dict An edge represented as a dict Returns ------- dict An edge represented as a dict, with default assumptions applied.
kgx/transformers/transformer.py
validate_edge
stevencox/kgx
0
python
@staticmethod def validate_edge(edge: dict) -> dict: '\n Given an edge as a dictionary, check for required properties.\n This method will return the edge dictionary with default assumptions applied, if any.\n\n Parameters\n ----------\n edge: dict\n An edge represented as a dict\n\n Returns\n -------\n dict\n An edge represented as a dict, with default assumptions applied.\n ' if ('subject' not in edge): raise KeyError("edge does not have 'subject' property: {}".format(edge)) if ('edge_label' not in edge): raise KeyError("edge does not have 'edge_label' property: {}".format(edge)) if ('object' not in edge): raise KeyError("edge does not have 'object' property: {}".format(edge)) return edge
@staticmethod def validate_edge(edge: dict) -> dict: '\n Given an edge as a dictionary, check for required properties.\n This method will return the edge dictionary with default assumptions applied, if any.\n\n Parameters\n ----------\n edge: dict\n An edge represented as a dict\n\n Returns\n -------\n dict\n An edge represented as a dict, with default assumptions applied.\n ' if ('subject' not in edge): raise KeyError("edge does not have 'subject' property: {}".format(edge)) if ('edge_label' not in edge): raise KeyError("edge does not have 'edge_label' property: {}".format(edge)) if ('object' not in edge): raise KeyError("edge does not have 'object' property: {}".format(edge)) return edge<|docstring|>Given an edge as a dictionary, check for required properties. This method will return the edge dictionary with default assumptions applied, if any. Parameters ---------- edge: dict An edge represented as a dict Returns ------- dict An edge represented as a dict, with default assumptions applied.<|endoftext|>
1933c7766ae770161f28ef1b510d2d0fc999c2ed8e3353e83f4c46367f4e4f52
def compute_convolution(I_o, T, stride=1): '\n This function takes an image <I> and a template <T> (both numpy arrays) \n and returns a heatmap where each grid represents the output produced by \n convolution at each location. You can add optional parameters (e.g. stride, \n window_size, padding) to create additional functionality. \n ' (n_rows, n_cols, n_channels) = np.shape(I_o) (n_rows_T, n_cols_T, n_channels_T) = np.shape(T) nrTh = int(((n_rows_T - 1) / 2)) ncTh = int(((n_cols_T - 1) / 2)) I = np.pad(I_o, ((nrTh, nrTh), (ncTh, ncTh), (0, 0)), 'constant') heatmap_rgb = np.zeros((n_rows, n_cols, n_channels)) T_n = np.zeros((n_rows_T, n_cols_T, n_channels_T)) for ch in range(n_channels_T): norm_T = np.linalg.norm(T[(:, :, ch)]) T_n[(:, :, ch)] = (T[(:, :, ch)] / norm_T) for i in range(n_rows): for j in range(n_cols): for ch in range(n_channels): norm_I = np.linalg.norm(I[(i:(i + n_rows_T), j:(j + n_cols_T), ch)]) I_cropped_n = (I[(i:(i + n_rows_T), j:(j + n_cols_T), ch)] / norm_I) heatmap_rgb[i][j][ch] = np.sum((T_n[(:, :, ch)] * I_cropped_n)) heatmap = (((0.8 * heatmap_rgb[(:, :, 0)]) + (0.1 * heatmap_rgb[(:, :, 1)])) + (0.1 * heatmap_rgb[(:, :, 2)])) return heatmap
This function takes an image <I> and a template <T> (both numpy arrays) and returns a heatmap where each grid represents the output produced by convolution at each location. You can add optional parameters (e.g. stride, window_size, padding) to create additional functionality.
run_predictions.py
compute_convolution
MingshuLiang/caltech-ee148-spring2020-hw02
0
python
def compute_convolution(I_o, T, stride=1): '\n This function takes an image <I> and a template <T> (both numpy arrays) \n and returns a heatmap where each grid represents the output produced by \n convolution at each location. You can add optional parameters (e.g. stride, \n window_size, padding) to create additional functionality. \n ' (n_rows, n_cols, n_channels) = np.shape(I_o) (n_rows_T, n_cols_T, n_channels_T) = np.shape(T) nrTh = int(((n_rows_T - 1) / 2)) ncTh = int(((n_cols_T - 1) / 2)) I = np.pad(I_o, ((nrTh, nrTh), (ncTh, ncTh), (0, 0)), 'constant') heatmap_rgb = np.zeros((n_rows, n_cols, n_channels)) T_n = np.zeros((n_rows_T, n_cols_T, n_channels_T)) for ch in range(n_channels_T): norm_T = np.linalg.norm(T[(:, :, ch)]) T_n[(:, :, ch)] = (T[(:, :, ch)] / norm_T) for i in range(n_rows): for j in range(n_cols): for ch in range(n_channels): norm_I = np.linalg.norm(I[(i:(i + n_rows_T), j:(j + n_cols_T), ch)]) I_cropped_n = (I[(i:(i + n_rows_T), j:(j + n_cols_T), ch)] / norm_I) heatmap_rgb[i][j][ch] = np.sum((T_n[(:, :, ch)] * I_cropped_n)) heatmap = (((0.8 * heatmap_rgb[(:, :, 0)]) + (0.1 * heatmap_rgb[(:, :, 1)])) + (0.1 * heatmap_rgb[(:, :, 2)])) return heatmap
def compute_convolution(I_o, T, stride=1): '\n This function takes an image <I> and a template <T> (both numpy arrays) \n and returns a heatmap where each grid represents the output produced by \n convolution at each location. You can add optional parameters (e.g. stride, \n window_size, padding) to create additional functionality. \n ' (n_rows, n_cols, n_channels) = np.shape(I_o) (n_rows_T, n_cols_T, n_channels_T) = np.shape(T) nrTh = int(((n_rows_T - 1) / 2)) ncTh = int(((n_cols_T - 1) / 2)) I = np.pad(I_o, ((nrTh, nrTh), (ncTh, ncTh), (0, 0)), 'constant') heatmap_rgb = np.zeros((n_rows, n_cols, n_channels)) T_n = np.zeros((n_rows_T, n_cols_T, n_channels_T)) for ch in range(n_channels_T): norm_T = np.linalg.norm(T[(:, :, ch)]) T_n[(:, :, ch)] = (T[(:, :, ch)] / norm_T) for i in range(n_rows): for j in range(n_cols): for ch in range(n_channels): norm_I = np.linalg.norm(I[(i:(i + n_rows_T), j:(j + n_cols_T), ch)]) I_cropped_n = (I[(i:(i + n_rows_T), j:(j + n_cols_T), ch)] / norm_I) heatmap_rgb[i][j][ch] = np.sum((T_n[(:, :, ch)] * I_cropped_n)) heatmap = (((0.8 * heatmap_rgb[(:, :, 0)]) + (0.1 * heatmap_rgb[(:, :, 1)])) + (0.1 * heatmap_rgb[(:, :, 2)])) return heatmap<|docstring|>This function takes an image <I> and a template <T> (both numpy arrays) and returns a heatmap where each grid represents the output produced by convolution at each location. You can add optional parameters (e.g. stride, window_size, padding) to create additional functionality.<|endoftext|>
28cbddeae2866753bc8ed83a0678a1972a72c80f54cf156cf07ec7708530b382
def predict_boxes(heatmap_o, n_rows_T, n_cols_T): '\n This function takes heatmap and returns the bounding boxes and associated\n confidence scores.\n ' n_rows = np.shape(heatmap_o)[0] n_cols = np.shape(heatmap_o)[1] output = [] '\n BEGIN YOUR CODE\n ' nrTh = int(((n_rows_T - 1) / 2)) ncTh = int(((n_cols_T - 1) / 2)) heatmap = np.where((heatmap_o > 0.92), heatmap_o, 0) while np.any((heatmap != 0)): score = np.amax(heatmap) idx = np.where((heatmap == score)) c_row = idx[0].item(0) c_col = idx[1].item(0) tl_row = (c_row - nrTh) tl_col = (c_col - ncTh) br_row = (tl_row + 7) br_col = (tl_col + 7) output.append([tl_row, tl_col, br_row, br_col, score]) top = np.max([(c_row - n_rows_T), 0]) bottom = np.min([(c_row + n_rows_T), n_rows]) left = np.max([(c_col - n_cols_T), 0]) right = np.min([(c_col + n_cols_T), n_cols]) heatmap[(top:bottom, left:right)] = 0 '\n END YOUR CODE\n ' return output
This function takes heatmap and returns the bounding boxes and associated confidence scores.
run_predictions.py
predict_boxes
MingshuLiang/caltech-ee148-spring2020-hw02
0
python
def predict_boxes(heatmap_o, n_rows_T, n_cols_T): '\n This function takes heatmap and returns the bounding boxes and associated\n confidence scores.\n ' n_rows = np.shape(heatmap_o)[0] n_cols = np.shape(heatmap_o)[1] output = [] '\n BEGIN YOUR CODE\n ' nrTh = int(((n_rows_T - 1) / 2)) ncTh = int(((n_cols_T - 1) / 2)) heatmap = np.where((heatmap_o > 0.92), heatmap_o, 0) while np.any((heatmap != 0)): score = np.amax(heatmap) idx = np.where((heatmap == score)) c_row = idx[0].item(0) c_col = idx[1].item(0) tl_row = (c_row - nrTh) tl_col = (c_col - ncTh) br_row = (tl_row + 7) br_col = (tl_col + 7) output.append([tl_row, tl_col, br_row, br_col, score]) top = np.max([(c_row - n_rows_T), 0]) bottom = np.min([(c_row + n_rows_T), n_rows]) left = np.max([(c_col - n_cols_T), 0]) right = np.min([(c_col + n_cols_T), n_cols]) heatmap[(top:bottom, left:right)] = 0 '\n END YOUR CODE\n ' return output
def predict_boxes(heatmap_o, n_rows_T, n_cols_T): '\n This function takes heatmap and returns the bounding boxes and associated\n confidence scores.\n ' n_rows = np.shape(heatmap_o)[0] n_cols = np.shape(heatmap_o)[1] output = [] '\n BEGIN YOUR CODE\n ' nrTh = int(((n_rows_T - 1) / 2)) ncTh = int(((n_cols_T - 1) / 2)) heatmap = np.where((heatmap_o > 0.92), heatmap_o, 0) while np.any((heatmap != 0)): score = np.amax(heatmap) idx = np.where((heatmap == score)) c_row = idx[0].item(0) c_col = idx[1].item(0) tl_row = (c_row - nrTh) tl_col = (c_col - ncTh) br_row = (tl_row + 7) br_col = (tl_col + 7) output.append([tl_row, tl_col, br_row, br_col, score]) top = np.max([(c_row - n_rows_T), 0]) bottom = np.min([(c_row + n_rows_T), n_rows]) left = np.max([(c_col - n_cols_T), 0]) right = np.min([(c_col + n_cols_T), n_cols]) heatmap[(top:bottom, left:right)] = 0 '\n END YOUR CODE\n ' return output<|docstring|>This function takes heatmap and returns the bounding boxes and associated confidence scores.<|endoftext|>
886fe39d6ba61563f6bec3e7e103855d650a23660794817d85f07389d0332fdd
def detect_red_light_mf(I): '\n This function takes a numpy array <I> and returns a list <output>.\n The length of <output> is the number of bounding boxes predicted for <I>. \n Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. \n The first four entries are four integers specifying a bounding box \n (the row and column index of the top left corner and the row and column \n index of the bottom right corner).\n <score> is a confidence score ranging from 0 to 1. \n\n Note that PIL loads images in RGB order, so:\n I[:,:,0] is the red channel\n I[:,:,1] is the green channel\n I[:,:,2] is the blue channel\n ' '\n BEGIN YOUR CODE\n ' T_path = 'data/kernel' T_names = sorted(os.listdir(T_path)) T_names = [f for f in T_names if ('.jpg' in f)] T = np.zeros((21, 9, 3)) for i in range(len(T_names)): T_c = Image.open(os.path.join(T_path, T_names[i])) T += np.asarray(T_c) T /= len(T_names) heatmap = compute_convolution(I, T) (n_rows_T, n_cols_T, n_channels_T) = np.shape(T) output = predict_boxes(heatmap, n_rows_T, n_cols_T) '\n END YOUR CODE\n ' for i in range(len(output)): assert (len(output[i]) == 5) assert ((output[i][4] >= 0.0) and (output[i][4] <= 1.0)) return output
This function takes a numpy array <I> and returns a list <output>. The length of <output> is the number of bounding boxes predicted for <I>. Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. The first four entries are four integers specifying a bounding box (the row and column index of the top left corner and the row and column index of the bottom right corner). <score> is a confidence score ranging from 0 to 1. Note that PIL loads images in RGB order, so: I[:,:,0] is the red channel I[:,:,1] is the green channel I[:,:,2] is the blue channel
run_predictions.py
detect_red_light_mf
MingshuLiang/caltech-ee148-spring2020-hw02
0
python
def detect_red_light_mf(I): '\n This function takes a numpy array <I> and returns a list <output>.\n The length of <output> is the number of bounding boxes predicted for <I>. \n Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. \n The first four entries are four integers specifying a bounding box \n (the row and column index of the top left corner and the row and column \n index of the bottom right corner).\n <score> is a confidence score ranging from 0 to 1. \n\n Note that PIL loads images in RGB order, so:\n I[:,:,0] is the red channel\n I[:,:,1] is the green channel\n I[:,:,2] is the blue channel\n ' '\n BEGIN YOUR CODE\n ' T_path = 'data/kernel' T_names = sorted(os.listdir(T_path)) T_names = [f for f in T_names if ('.jpg' in f)] T = np.zeros((21, 9, 3)) for i in range(len(T_names)): T_c = Image.open(os.path.join(T_path, T_names[i])) T += np.asarray(T_c) T /= len(T_names) heatmap = compute_convolution(I, T) (n_rows_T, n_cols_T, n_channels_T) = np.shape(T) output = predict_boxes(heatmap, n_rows_T, n_cols_T) '\n END YOUR CODE\n ' for i in range(len(output)): assert (len(output[i]) == 5) assert ((output[i][4] >= 0.0) and (output[i][4] <= 1.0)) return output
def detect_red_light_mf(I): '\n This function takes a numpy array <I> and returns a list <output>.\n The length of <output> is the number of bounding boxes predicted for <I>. \n Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. \n The first four entries are four integers specifying a bounding box \n (the row and column index of the top left corner and the row and column \n index of the bottom right corner).\n <score> is a confidence score ranging from 0 to 1. \n\n Note that PIL loads images in RGB order, so:\n I[:,:,0] is the red channel\n I[:,:,1] is the green channel\n I[:,:,2] is the blue channel\n ' '\n BEGIN YOUR CODE\n ' T_path = 'data/kernel' T_names = sorted(os.listdir(T_path)) T_names = [f for f in T_names if ('.jpg' in f)] T = np.zeros((21, 9, 3)) for i in range(len(T_names)): T_c = Image.open(os.path.join(T_path, T_names[i])) T += np.asarray(T_c) T /= len(T_names) heatmap = compute_convolution(I, T) (n_rows_T, n_cols_T, n_channels_T) = np.shape(T) output = predict_boxes(heatmap, n_rows_T, n_cols_T) '\n END YOUR CODE\n ' for i in range(len(output)): assert (len(output[i]) == 5) assert ((output[i][4] >= 0.0) and (output[i][4] <= 1.0)) return output<|docstring|>This function takes a numpy array <I> and returns a list <output>. The length of <output> is the number of bounding boxes predicted for <I>. Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. The first four entries are four integers specifying a bounding box (the row and column index of the top left corner and the row and column index of the bottom right corner). <score> is a confidence score ranging from 0 to 1. Note that PIL loads images in RGB order, so: I[:,:,0] is the red channel I[:,:,1] is the green channel I[:,:,2] is the blue channel<|endoftext|>
67c7d8a53bb7acf46c905080238afc4088fc3305d7fc11db82ff5dc0422f074b
def make_promotion_mask_up(step_size, data_format): '\n 右上、右、上、左上、左方向に移動した時の成りの領域のマスク\n 成れる場所ならばTrue、それ以外はFalse\n\n :param step_size:\n :param data_format:\n :return:\n ' name = 'black_promotion_mask_up' collection = tf.get_collection_ref(name) if len(collection): return collection[0] if (data_format == 'NCHW'): mask = np.zeros((1, 1, 9, 9), dtype=np.bool) mask[(:, :, :, :3)] = True else: mask = np.zeros((1, 9, 9, 1), dtype=np.bool) mask[(:, :, :3, :)] = True mask = tf.constant(mask, dtype=tf.bool) tf.add_to_collection(name, mask) return mask
右上、右、上、左上、左方向に移動した時の成りの領域のマスク 成れる場所ならばTrue、それ以外はFalse :param step_size: :param data_format: :return:
annotation/black_action/promotion_mask.py
make_promotion_mask_up
windfall-shogi/feature-annotation
0
python
def make_promotion_mask_up(step_size, data_format): '\n 右上、右、上、左上、左方向に移動した時の成りの領域のマスク\n 成れる場所ならばTrue、それ以外はFalse\n\n :param step_size:\n :param data_format:\n :return:\n ' name = 'black_promotion_mask_up' collection = tf.get_collection_ref(name) if len(collection): return collection[0] if (data_format == 'NCHW'): mask = np.zeros((1, 1, 9, 9), dtype=np.bool) mask[(:, :, :, :3)] = True else: mask = np.zeros((1, 9, 9, 1), dtype=np.bool) mask[(:, :, :3, :)] = True mask = tf.constant(mask, dtype=tf.bool) tf.add_to_collection(name, mask) return mask
def make_promotion_mask_up(step_size, data_format): '\n 右上、右、上、左上、左方向に移動した時の成りの領域のマスク\n 成れる場所ならばTrue、それ以外はFalse\n\n :param step_size:\n :param data_format:\n :return:\n ' name = 'black_promotion_mask_up' collection = tf.get_collection_ref(name) if len(collection): return collection[0] if (data_format == 'NCHW'): mask = np.zeros((1, 1, 9, 9), dtype=np.bool) mask[(:, :, :, :3)] = True else: mask = np.zeros((1, 9, 9, 1), dtype=np.bool) mask[(:, :, :3, :)] = True mask = tf.constant(mask, dtype=tf.bool) tf.add_to_collection(name, mask) return mask<|docstring|>右上、右、上、左上、左方向に移動した時の成りの領域のマスク 成れる場所ならばTrue、それ以外はFalse :param step_size: :param data_format: :return:<|endoftext|>
9a43e60ed0f1f55738e75243208ad2b9a5d565dc7972119b9a3a229c13887e0f
def make_promotion_mask_down(step_size, data_format): '\n 右下、下、左下方向に移動した時の成りの領域のマスク\n 成れる場所ならばTrue、それ以外はFalse\n\n :param step_size:\n :param data_format:\n :return:\n ' name = 'black_promotion_mask_down{}'.format(step_size) collection = tf.get_collection_ref(name) if len(collection): return collection[0] if (data_format == 'NCHW'): mask = np.zeros((1, 1, 9, 9), dtype=np.bool) mask[(:, :, :, step_size:(step_size + 3))] = True else: mask = np.zeros((1, 9, 9, 1), dtype=np.bool) mask[(:, :, step_size:(step_size + 3), :)] = True mask = tf.constant(mask, dtype=tf.bool) tf.add_to_collection(name, mask) return mask
右下、下、左下方向に移動した時の成りの領域のマスク 成れる場所ならばTrue、それ以外はFalse :param step_size: :param data_format: :return:
annotation/black_action/promotion_mask.py
make_promotion_mask_down
windfall-shogi/feature-annotation
0
python
def make_promotion_mask_down(step_size, data_format): '\n 右下、下、左下方向に移動した時の成りの領域のマスク\n 成れる場所ならばTrue、それ以外はFalse\n\n :param step_size:\n :param data_format:\n :return:\n ' name = 'black_promotion_mask_down{}'.format(step_size) collection = tf.get_collection_ref(name) if len(collection): return collection[0] if (data_format == 'NCHW'): mask = np.zeros((1, 1, 9, 9), dtype=np.bool) mask[(:, :, :, step_size:(step_size + 3))] = True else: mask = np.zeros((1, 9, 9, 1), dtype=np.bool) mask[(:, :, step_size:(step_size + 3), :)] = True mask = tf.constant(mask, dtype=tf.bool) tf.add_to_collection(name, mask) return mask
def make_promotion_mask_down(step_size, data_format): '\n 右下、下、左下方向に移動した時の成りの領域のマスク\n 成れる場所ならばTrue、それ以外はFalse\n\n :param step_size:\n :param data_format:\n :return:\n ' name = 'black_promotion_mask_down{}'.format(step_size) collection = tf.get_collection_ref(name) if len(collection): return collection[0] if (data_format == 'NCHW'): mask = np.zeros((1, 1, 9, 9), dtype=np.bool) mask[(:, :, :, step_size:(step_size + 3))] = True else: mask = np.zeros((1, 9, 9, 1), dtype=np.bool) mask[(:, :, step_size:(step_size + 3), :)] = True mask = tf.constant(mask, dtype=tf.bool) tf.add_to_collection(name, mask) return mask<|docstring|>右下、下、左下方向に移動した時の成りの領域のマスク 成れる場所ならばTrue、それ以外はFalse :param step_size: :param data_format: :return:<|endoftext|>
cec5f3b8ab927ef73702eed50a905fad1735ac0aca43ecf3161be039f9bc708b
def word_tokenize(text: str, separator: str='-', return_tokens: bool=True) -> Union[(List, str)]: 'Khmer language word tokenization\n\n Args:\n text(str): Raw text\n separator(str, optional): Token - separator in case return_tokens = True. Defaults to "-".\n return_tokens(bool, optional): Whether return a tokenized text or a list of tokens. Defaults to True.\n\n Returns:\n Union[List, str]: Tokens or tokenized text, separated by the separator\n ' text = cleanup_str(text) skcc = seg_kcc(text) features = create_kcc_features(skcc) pred = crf_model.predict([features]) tkcc = [] for k in features: tkcc.append(k['kcc']) complete = '' tokens = [] for (i, p) in enumerate(pred[0]): if ((p == '1') or (i == 0)): tokens.append(tkcc[i]) else: tokens[(- 1)] += tkcc[i] if return_tokens: return tokens complete = separator.join(tokens) complete = complete.replace(((separator + ' ') + separator), ' ') return complete
Khmer language word tokenization Args: text(str): Raw text separator(str, optional): Token - separator in case return_tokens = True. Defaults to "-". return_tokens(bool, optional): Whether return a tokenized text or a list of tokens. Defaults to True. Returns: Union[List, str]: Tokens or tokenized text, separated by the separator
khmernltk/word_tokenize/__init__.py
word_tokenize
VietHoang1710/khmer_nltk
18
python
def word_tokenize(text: str, separator: str='-', return_tokens: bool=True) -> Union[(List, str)]: 'Khmer language word tokenization\n\n Args:\n text(str): Raw text\n separator(str, optional): Token - separator in case return_tokens = True. Defaults to "-".\n return_tokens(bool, optional): Whether return a tokenized text or a list of tokens. Defaults to True.\n\n Returns:\n Union[List, str]: Tokens or tokenized text, separated by the separator\n ' text = cleanup_str(text) skcc = seg_kcc(text) features = create_kcc_features(skcc) pred = crf_model.predict([features]) tkcc = [] for k in features: tkcc.append(k['kcc']) complete = tokens = [] for (i, p) in enumerate(pred[0]): if ((p == '1') or (i == 0)): tokens.append(tkcc[i]) else: tokens[(- 1)] += tkcc[i] if return_tokens: return tokens complete = separator.join(tokens) complete = complete.replace(((separator + ' ') + separator), ' ') return complete
def word_tokenize(text: str, separator: str='-', return_tokens: bool=True) -> Union[(List, str)]: 'Khmer language word tokenization\n\n Args:\n text(str): Raw text\n separator(str, optional): Token - separator in case return_tokens = True. Defaults to "-".\n return_tokens(bool, optional): Whether return a tokenized text or a list of tokens. Defaults to True.\n\n Returns:\n Union[List, str]: Tokens or tokenized text, separated by the separator\n ' text = cleanup_str(text) skcc = seg_kcc(text) features = create_kcc_features(skcc) pred = crf_model.predict([features]) tkcc = [] for k in features: tkcc.append(k['kcc']) complete = tokens = [] for (i, p) in enumerate(pred[0]): if ((p == '1') or (i == 0)): tokens.append(tkcc[i]) else: tokens[(- 1)] += tkcc[i] if return_tokens: return tokens complete = separator.join(tokens) complete = complete.replace(((separator + ' ') + separator), ' ') return complete<|docstring|>Khmer language word tokenization Args: text(str): Raw text separator(str, optional): Token - separator in case return_tokens = True. Defaults to "-". return_tokens(bool, optional): Whether return a tokenized text or a list of tokens. Defaults to True. Returns: Union[List, str]: Tokens or tokenized text, separated by the separator<|endoftext|>
6106380a028375fd8d045de90d8dd242dc0441cbcdb601933068b6e2c065ca7e
def load(filename): 'Load an image from the given filename.\n\n Parameters\n ----------\n filename : string\n Should resolve to a complete filename path.\n\n Returns\n -------\n image : An `Image` object\n If successful, a new `Image` object is returned.\n\n See Also\n --------\n save_image : function for saving images\n fromarray : function for creating images from numpy arrays\n\n Examples\n --------\n\n >>> from nipy.io.api import load_image\n >>> from nipy.testing import anatfile\n >>> img = load_image(anatfile)\n >>> img.shape\n (33, 41, 25)\n ' img = formats.load(filename) aff = img.get_affine() shape = img.get_shape() hdr = img.get_header() try: fps = hdr.get_dim_info() except (TypeError, AttributeError): fps = (None, None, None) ijk = ijk_from_fps(fps) try: zooms = hdr.get_zooms() except AttributeError: zooms = np.ones(len(shape)) aff = _match_affine(aff, len(shape), zooms) coordmap = coordmap_from_affine(aff, ijk) img = Image(img.get_data(), coordmap) img.header = hdr return img
Load an image from the given filename. Parameters ---------- filename : string Should resolve to a complete filename path. Returns ------- image : An `Image` object If successful, a new `Image` object is returned. See Also -------- save_image : function for saving images fromarray : function for creating images from numpy arrays Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import anatfile >>> img = load_image(anatfile) >>> img.shape (33, 41, 25)
nipy/io/files.py
load
yarikoptic/NiPy-OLD
1
python
def load(filename): 'Load an image from the given filename.\n\n Parameters\n ----------\n filename : string\n Should resolve to a complete filename path.\n\n Returns\n -------\n image : An `Image` object\n If successful, a new `Image` object is returned.\n\n See Also\n --------\n save_image : function for saving images\n fromarray : function for creating images from numpy arrays\n\n Examples\n --------\n\n >>> from nipy.io.api import load_image\n >>> from nipy.testing import anatfile\n >>> img = load_image(anatfile)\n >>> img.shape\n (33, 41, 25)\n ' img = formats.load(filename) aff = img.get_affine() shape = img.get_shape() hdr = img.get_header() try: fps = hdr.get_dim_info() except (TypeError, AttributeError): fps = (None, None, None) ijk = ijk_from_fps(fps) try: zooms = hdr.get_zooms() except AttributeError: zooms = np.ones(len(shape)) aff = _match_affine(aff, len(shape), zooms) coordmap = coordmap_from_affine(aff, ijk) img = Image(img.get_data(), coordmap) img.header = hdr return img
def load(filename): 'Load an image from the given filename.\n\n Parameters\n ----------\n filename : string\n Should resolve to a complete filename path.\n\n Returns\n -------\n image : An `Image` object\n If successful, a new `Image` object is returned.\n\n See Also\n --------\n save_image : function for saving images\n fromarray : function for creating images from numpy arrays\n\n Examples\n --------\n\n >>> from nipy.io.api import load_image\n >>> from nipy.testing import anatfile\n >>> img = load_image(anatfile)\n >>> img.shape\n (33, 41, 25)\n ' img = formats.load(filename) aff = img.get_affine() shape = img.get_shape() hdr = img.get_header() try: fps = hdr.get_dim_info() except (TypeError, AttributeError): fps = (None, None, None) ijk = ijk_from_fps(fps) try: zooms = hdr.get_zooms() except AttributeError: zooms = np.ones(len(shape)) aff = _match_affine(aff, len(shape), zooms) coordmap = coordmap_from_affine(aff, ijk) img = Image(img.get_data(), coordmap) img.header = hdr return img<|docstring|>Load an image from the given filename. Parameters ---------- filename : string Should resolve to a complete filename path. Returns ------- image : An `Image` object If successful, a new `Image` object is returned. See Also -------- save_image : function for saving images fromarray : function for creating images from numpy arrays Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import anatfile >>> img = load_image(anatfile) >>> img.shape (33, 41, 25)<|endoftext|>
89f29a007e6c197096ab81c4448135cbe53d97edf5e46c44eadbab52dc00c628
def _match_affine(aff, ndim, zooms=None): ' Fill or prune affine to given number of dimensions\n\n >>> aff = np.arange(16).reshape(4,4)\n >>> _match_affine(aff, 3)\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> _match_affine(aff, 2)\n array([[ 0., 1., 3.],\n [ 4., 5., 7.],\n [ 0., 0., 1.]])\n >>> _match_affine(aff, 4)\n array([[ 0., 1., 2., 0., 3.],\n [ 4., 5., 6., 0., 7.],\n [ 8., 9., 10., 0., 11.],\n [ 0., 0., 0., 1., 0.],\n [ 0., 0., 0., 0., 1.]])\n >>> aff = np.arange(9).reshape(3,3)\n >>> _match_affine(aff, 2)\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n ' if (aff.shape[0] != aff.shape[1]): raise ValueError('Need square affine') aff_dim = (aff.shape[0] - 1) if (ndim == aff_dim): return aff aff_diag = np.ones((ndim + 1)) if (not (zooms is None)): n = min(len(zooms), ndim) aff_diag[:n] = zooms[:n] mod_aff = np.diag(aff_diag) n = min(ndim, aff_dim) mod_aff[(:n, :n)] = aff[(:n, :n)] mod_aff[(:n, (- 1))] = aff[(:n, (- 1))] return mod_aff
Fill or prune affine to given number of dimensions >>> aff = np.arange(16).reshape(4,4) >>> _match_affine(aff, 3) array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> _match_affine(aff, 2) array([[ 0., 1., 3.], [ 4., 5., 7.], [ 0., 0., 1.]]) >>> _match_affine(aff, 4) array([[ 0., 1., 2., 0., 3.], [ 4., 5., 6., 0., 7.], [ 8., 9., 10., 0., 11.], [ 0., 0., 0., 1., 0.], [ 0., 0., 0., 0., 1.]]) >>> aff = np.arange(9).reshape(3,3) >>> _match_affine(aff, 2) array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
nipy/io/files.py
_match_affine
yarikoptic/NiPy-OLD
1
python
def _match_affine(aff, ndim, zooms=None): ' Fill or prune affine to given number of dimensions\n\n >>> aff = np.arange(16).reshape(4,4)\n >>> _match_affine(aff, 3)\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> _match_affine(aff, 2)\n array([[ 0., 1., 3.],\n [ 4., 5., 7.],\n [ 0., 0., 1.]])\n >>> _match_affine(aff, 4)\n array([[ 0., 1., 2., 0., 3.],\n [ 4., 5., 6., 0., 7.],\n [ 8., 9., 10., 0., 11.],\n [ 0., 0., 0., 1., 0.],\n [ 0., 0., 0., 0., 1.]])\n >>> aff = np.arange(9).reshape(3,3)\n >>> _match_affine(aff, 2)\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n ' if (aff.shape[0] != aff.shape[1]): raise ValueError('Need square affine') aff_dim = (aff.shape[0] - 1) if (ndim == aff_dim): return aff aff_diag = np.ones((ndim + 1)) if (not (zooms is None)): n = min(len(zooms), ndim) aff_diag[:n] = zooms[:n] mod_aff = np.diag(aff_diag) n = min(ndim, aff_dim) mod_aff[(:n, :n)] = aff[(:n, :n)] mod_aff[(:n, (- 1))] = aff[(:n, (- 1))] return mod_aff
def _match_affine(aff, ndim, zooms=None): ' Fill or prune affine to given number of dimensions\n\n >>> aff = np.arange(16).reshape(4,4)\n >>> _match_affine(aff, 3)\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> _match_affine(aff, 2)\n array([[ 0., 1., 3.],\n [ 4., 5., 7.],\n [ 0., 0., 1.]])\n >>> _match_affine(aff, 4)\n array([[ 0., 1., 2., 0., 3.],\n [ 4., 5., 6., 0., 7.],\n [ 8., 9., 10., 0., 11.],\n [ 0., 0., 0., 1., 0.],\n [ 0., 0., 0., 0., 1.]])\n >>> aff = np.arange(9).reshape(3,3)\n >>> _match_affine(aff, 2)\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n ' if (aff.shape[0] != aff.shape[1]): raise ValueError('Need square affine') aff_dim = (aff.shape[0] - 1) if (ndim == aff_dim): return aff aff_diag = np.ones((ndim + 1)) if (not (zooms is None)): n = min(len(zooms), ndim) aff_diag[:n] = zooms[:n] mod_aff = np.diag(aff_diag) n = min(ndim, aff_dim) mod_aff[(:n, :n)] = aff[(:n, :n)] mod_aff[(:n, (- 1))] = aff[(:n, (- 1))] return mod_aff<|docstring|>Fill or prune affine to given number of dimensions >>> aff = np.arange(16).reshape(4,4) >>> _match_affine(aff, 3) array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> _match_affine(aff, 2) array([[ 0., 1., 3.], [ 4., 5., 7.], [ 0., 0., 1.]]) >>> _match_affine(aff, 4) array([[ 0., 1., 2., 0., 3.], [ 4., 5., 6., 0., 7.], [ 8., 9., 10., 0., 11.], [ 0., 0., 0., 1., 0.], [ 0., 0., 0., 0., 1.]]) >>> aff = np.arange(9).reshape(3,3) >>> _match_affine(aff, 2) array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])<|endoftext|>
8f29f7f063a677667e97a2020fe507b364cab514afdb32012cd500a793707ef0
def save(img, filename, dtype=None): 'Write the image to a file.\n\n Parameters\n ----------\n img : An `Image` object\n filename : string\n Should be a valid filename.\n\n Returns\n -------\n image : An `Image` object\n\n See Also\n --------\n load_image : function for loading images\n fromarray : function for creating images from numpy arrays\n\n Examples\n --------\n\n >>> import os\n >>> import numpy as np\n >>> from tempfile import mkstemp\n >>> from nipy.core.api import fromarray\n >>> from nipy.io.api import save_image\n >>> data = np.zeros((91,109,91), dtype=np.uint8)\n >>> img = fromarray(data, \'kji\', \'zxy\')\n >>> fd, fname = mkstemp(suffix=\'.nii.gz\')\n >>> saved_img = save_image(img, fname)\n >>> saved_img.shape\n (91, 109, 91)\n >>> os.unlink(fname)\n >>> fd, fname = mkstemp(suffix=\'.img.gz\')\n >>> saved_img = save_image(img, fname)\n >>> saved_img.shape\n (91, 109, 91)\n >>> os.unlink(fname)\n >>> fname = \'test.mnc\'\n >>> saved_image = save_image(img, fname)\n Traceback (most recent call last):\n ...\n ValueError: Cannot save file type "minc"\n \n Notes\n -----\n Filetype is determined by the file extension in \'filename\'. Currently the\n following filetypes are supported:\n \n * Nifti single file : [\'.nii\', \'.nii.gz\']\n * Nifti file pair : [\'.hdr\', \'.hdr.gz\']\n * Analyze file pair : [\'.img\', \'img.gz\']\n ' try: original_hdr = img.header except AttributeError: original_hdr = None (newcmap, order) = coerce_coordmap(img.coordmap) Fimg = Image(np.transpose(np.asarray(img), order), newcmap) rzs = Fimg.affine[(:(- 1), :(- 1))] zooms = np.sqrt(np.sum((rzs * rzs), axis=0)) aff = _match_affine(Fimg.affine, 3, zooms) ftype = _type_from_filename(filename) if ftype.startswith('nifti1'): klass = formats.Nifti1Image elif (ftype == 'analyze'): klass = formats.Spm2AnalyzeImage else: raise ValueError(('Cannot save file type "%s"' % ftype)) out_img = klass(data=np.asarray(Fimg), affine=aff, header=original_hdr) hdr = out_img.get_header() ijk = newcmap.input_coords.coord_names fps = fps_from_ijk(ijk) try: hdr.set_dim_info(*fps) except AttributeError: pass hdr.set_zooms(zooms) out_img.to_filename(filename) return Fimg
Write the image to a file. Parameters ---------- img : An `Image` object filename : string Should be a valid filename. Returns ------- image : An `Image` object See Also -------- load_image : function for loading images fromarray : function for creating images from numpy arrays Examples -------- >>> import os >>> import numpy as np >>> from tempfile import mkstemp >>> from nipy.core.api import fromarray >>> from nipy.io.api import save_image >>> data = np.zeros((91,109,91), dtype=np.uint8) >>> img = fromarray(data, 'kji', 'zxy') >>> fd, fname = mkstemp(suffix='.nii.gz') >>> saved_img = save_image(img, fname) >>> saved_img.shape (91, 109, 91) >>> os.unlink(fname) >>> fd, fname = mkstemp(suffix='.img.gz') >>> saved_img = save_image(img, fname) >>> saved_img.shape (91, 109, 91) >>> os.unlink(fname) >>> fname = 'test.mnc' >>> saved_image = save_image(img, fname) Traceback (most recent call last): ... ValueError: Cannot save file type "minc" Notes ----- Filetype is determined by the file extension in 'filename'. Currently the following filetypes are supported: * Nifti single file : ['.nii', '.nii.gz'] * Nifti file pair : ['.hdr', '.hdr.gz'] * Analyze file pair : ['.img', 'img.gz']
nipy/io/files.py
save
yarikoptic/NiPy-OLD
1
python
def save(img, filename, dtype=None): 'Write the image to a file.\n\n Parameters\n ----------\n img : An `Image` object\n filename : string\n Should be a valid filename.\n\n Returns\n -------\n image : An `Image` object\n\n See Also\n --------\n load_image : function for loading images\n fromarray : function for creating images from numpy arrays\n\n Examples\n --------\n\n >>> import os\n >>> import numpy as np\n >>> from tempfile import mkstemp\n >>> from nipy.core.api import fromarray\n >>> from nipy.io.api import save_image\n >>> data = np.zeros((91,109,91), dtype=np.uint8)\n >>> img = fromarray(data, \'kji\', \'zxy\')\n >>> fd, fname = mkstemp(suffix=\'.nii.gz\')\n >>> saved_img = save_image(img, fname)\n >>> saved_img.shape\n (91, 109, 91)\n >>> os.unlink(fname)\n >>> fd, fname = mkstemp(suffix=\'.img.gz\')\n >>> saved_img = save_image(img, fname)\n >>> saved_img.shape\n (91, 109, 91)\n >>> os.unlink(fname)\n >>> fname = \'test.mnc\'\n >>> saved_image = save_image(img, fname)\n Traceback (most recent call last):\n ...\n ValueError: Cannot save file type "minc"\n \n Notes\n -----\n Filetype is determined by the file extension in \'filename\'. Currently the\n following filetypes are supported:\n \n * Nifti single file : [\'.nii\', \'.nii.gz\']\n * Nifti file pair : [\'.hdr\', \'.hdr.gz\']\n * Analyze file pair : [\'.img\', \'img.gz\']\n ' try: original_hdr = img.header except AttributeError: original_hdr = None (newcmap, order) = coerce_coordmap(img.coordmap) Fimg = Image(np.transpose(np.asarray(img), order), newcmap) rzs = Fimg.affine[(:(- 1), :(- 1))] zooms = np.sqrt(np.sum((rzs * rzs), axis=0)) aff = _match_affine(Fimg.affine, 3, zooms) ftype = _type_from_filename(filename) if ftype.startswith('nifti1'): klass = formats.Nifti1Image elif (ftype == 'analyze'): klass = formats.Spm2AnalyzeImage else: raise ValueError(('Cannot save file type "%s"' % ftype)) out_img = klass(data=np.asarray(Fimg), affine=aff, header=original_hdr) hdr = out_img.get_header() ijk = newcmap.input_coords.coord_names fps = fps_from_ijk(ijk) try: hdr.set_dim_info(*fps) except AttributeError: pass hdr.set_zooms(zooms) out_img.to_filename(filename) return Fimg
def save(img, filename, dtype=None): 'Write the image to a file.\n\n Parameters\n ----------\n img : An `Image` object\n filename : string\n Should be a valid filename.\n\n Returns\n -------\n image : An `Image` object\n\n See Also\n --------\n load_image : function for loading images\n fromarray : function for creating images from numpy arrays\n\n Examples\n --------\n\n >>> import os\n >>> import numpy as np\n >>> from tempfile import mkstemp\n >>> from nipy.core.api import fromarray\n >>> from nipy.io.api import save_image\n >>> data = np.zeros((91,109,91), dtype=np.uint8)\n >>> img = fromarray(data, \'kji\', \'zxy\')\n >>> fd, fname = mkstemp(suffix=\'.nii.gz\')\n >>> saved_img = save_image(img, fname)\n >>> saved_img.shape\n (91, 109, 91)\n >>> os.unlink(fname)\n >>> fd, fname = mkstemp(suffix=\'.img.gz\')\n >>> saved_img = save_image(img, fname)\n >>> saved_img.shape\n (91, 109, 91)\n >>> os.unlink(fname)\n >>> fname = \'test.mnc\'\n >>> saved_image = save_image(img, fname)\n Traceback (most recent call last):\n ...\n ValueError: Cannot save file type "minc"\n \n Notes\n -----\n Filetype is determined by the file extension in \'filename\'. Currently the\n following filetypes are supported:\n \n * Nifti single file : [\'.nii\', \'.nii.gz\']\n * Nifti file pair : [\'.hdr\', \'.hdr.gz\']\n * Analyze file pair : [\'.img\', \'img.gz\']\n ' try: original_hdr = img.header except AttributeError: original_hdr = None (newcmap, order) = coerce_coordmap(img.coordmap) Fimg = Image(np.transpose(np.asarray(img), order), newcmap) rzs = Fimg.affine[(:(- 1), :(- 1))] zooms = np.sqrt(np.sum((rzs * rzs), axis=0)) aff = _match_affine(Fimg.affine, 3, zooms) ftype = _type_from_filename(filename) if ftype.startswith('nifti1'): klass = formats.Nifti1Image elif (ftype == 'analyze'): klass = formats.Spm2AnalyzeImage else: raise ValueError(('Cannot save file type "%s"' % ftype)) out_img = klass(data=np.asarray(Fimg), affine=aff, header=original_hdr) hdr = out_img.get_header() ijk = newcmap.input_coords.coord_names fps = fps_from_ijk(ijk) try: hdr.set_dim_info(*fps) except AttributeError: pass hdr.set_zooms(zooms) out_img.to_filename(filename) return Fimg<|docstring|>Write the image to a file. Parameters ---------- img : An `Image` object filename : string Should be a valid filename. Returns ------- image : An `Image` object See Also -------- load_image : function for loading images fromarray : function for creating images from numpy arrays Examples -------- >>> import os >>> import numpy as np >>> from tempfile import mkstemp >>> from nipy.core.api import fromarray >>> from nipy.io.api import save_image >>> data = np.zeros((91,109,91), dtype=np.uint8) >>> img = fromarray(data, 'kji', 'zxy') >>> fd, fname = mkstemp(suffix='.nii.gz') >>> saved_img = save_image(img, fname) >>> saved_img.shape (91, 109, 91) >>> os.unlink(fname) >>> fd, fname = mkstemp(suffix='.img.gz') >>> saved_img = save_image(img, fname) >>> saved_img.shape (91, 109, 91) >>> os.unlink(fname) >>> fname = 'test.mnc' >>> saved_image = save_image(img, fname) Traceback (most recent call last): ... ValueError: Cannot save file type "minc" Notes ----- Filetype is determined by the file extension in 'filename'. Currently the following filetypes are supported: * Nifti single file : ['.nii', '.nii.gz'] * Nifti file pair : ['.hdr', '.hdr.gz'] * Analyze file pair : ['.img', 'img.gz']<|endoftext|>
efb835e8e37a6bfece8178c404e1ad6d42fb566b0f1caea93198b327615404df
def _type_from_filename(filename): " Return image type determined from filename\n \n Filetype is determined by the file extension in 'filename'.\n Currently the following filetypes are supported:\n \n * Nifti single file : ['.nii', '.nii.gz']\n * Nifti file pair : ['.hdr', '.hdr.gz']\n * Analyze file pair : ['.img', '.img.gz']\n\n >>> _type_from_filename('test.nii')\n 'nifti1single'\n >>> _type_from_filename('test')\n 'nifti1single'\n >>> _type_from_filename('test.hdr')\n 'nifti1pair'\n >>> _type_from_filename('test.hdr.gz')\n 'nifti1pair'\n >>> _type_from_filename('test.img.gz')\n 'analyze'\n >>> _type_from_filename('test.mnc')\n 'minc'\n " if filename.endswith('.gz'): filename = filename[:(- 3)] elif filename.endswith('.bz2'): filename = filename[:(- 4)] (_, ext) = os.path.splitext(filename) if (ext in ('', '.nii')): return 'nifti1single' if (ext == '.hdr'): return 'nifti1pair' if (ext == '.img'): return 'analyze' if (ext == '.mnc'): return 'minc' raise ValueError(('Strange file extension "%s"' % ext))
Return image type determined from filename Filetype is determined by the file extension in 'filename'. Currently the following filetypes are supported: * Nifti single file : ['.nii', '.nii.gz'] * Nifti file pair : ['.hdr', '.hdr.gz'] * Analyze file pair : ['.img', '.img.gz'] >>> _type_from_filename('test.nii') 'nifti1single' >>> _type_from_filename('test') 'nifti1single' >>> _type_from_filename('test.hdr') 'nifti1pair' >>> _type_from_filename('test.hdr.gz') 'nifti1pair' >>> _type_from_filename('test.img.gz') 'analyze' >>> _type_from_filename('test.mnc') 'minc'
nipy/io/files.py
_type_from_filename
yarikoptic/NiPy-OLD
1
python
def _type_from_filename(filename): " Return image type determined from filename\n \n Filetype is determined by the file extension in 'filename'.\n Currently the following filetypes are supported:\n \n * Nifti single file : ['.nii', '.nii.gz']\n * Nifti file pair : ['.hdr', '.hdr.gz']\n * Analyze file pair : ['.img', '.img.gz']\n\n >>> _type_from_filename('test.nii')\n 'nifti1single'\n >>> _type_from_filename('test')\n 'nifti1single'\n >>> _type_from_filename('test.hdr')\n 'nifti1pair'\n >>> _type_from_filename('test.hdr.gz')\n 'nifti1pair'\n >>> _type_from_filename('test.img.gz')\n 'analyze'\n >>> _type_from_filename('test.mnc')\n 'minc'\n " if filename.endswith('.gz'): filename = filename[:(- 3)] elif filename.endswith('.bz2'): filename = filename[:(- 4)] (_, ext) = os.path.splitext(filename) if (ext in (, '.nii')): return 'nifti1single' if (ext == '.hdr'): return 'nifti1pair' if (ext == '.img'): return 'analyze' if (ext == '.mnc'): return 'minc' raise ValueError(('Strange file extension "%s"' % ext))
def _type_from_filename(filename): " Return image type determined from filename\n \n Filetype is determined by the file extension in 'filename'.\n Currently the following filetypes are supported:\n \n * Nifti single file : ['.nii', '.nii.gz']\n * Nifti file pair : ['.hdr', '.hdr.gz']\n * Analyze file pair : ['.img', '.img.gz']\n\n >>> _type_from_filename('test.nii')\n 'nifti1single'\n >>> _type_from_filename('test')\n 'nifti1single'\n >>> _type_from_filename('test.hdr')\n 'nifti1pair'\n >>> _type_from_filename('test.hdr.gz')\n 'nifti1pair'\n >>> _type_from_filename('test.img.gz')\n 'analyze'\n >>> _type_from_filename('test.mnc')\n 'minc'\n " if filename.endswith('.gz'): filename = filename[:(- 3)] elif filename.endswith('.bz2'): filename = filename[:(- 4)] (_, ext) = os.path.splitext(filename) if (ext in (, '.nii')): return 'nifti1single' if (ext == '.hdr'): return 'nifti1pair' if (ext == '.img'): return 'analyze' if (ext == '.mnc'): return 'minc' raise ValueError(('Strange file extension "%s"' % ext))<|docstring|>Return image type determined from filename Filetype is determined by the file extension in 'filename'. Currently the following filetypes are supported: * Nifti single file : ['.nii', '.nii.gz'] * Nifti file pair : ['.hdr', '.hdr.gz'] * Analyze file pair : ['.img', '.img.gz'] >>> _type_from_filename('test.nii') 'nifti1single' >>> _type_from_filename('test') 'nifti1single' >>> _type_from_filename('test.hdr') 'nifti1pair' >>> _type_from_filename('test.hdr.gz') 'nifti1pair' >>> _type_from_filename('test.img.gz') 'analyze' >>> _type_from_filename('test.mnc') 'minc'<|endoftext|>
dcca314eaa3cc42efa393a27a5ef3cb7e3d858b0dfed24eb208a76e0d54c444f
def as_image(image_input): ' Load image from filename or pass through image instance\n\n Parameters\n ----------\n image_input : str or Image instance\n image or string filename of image. If a string, load image and\n return. If an image, pass through without modification\n\n Returns\n -------\n img : Image or Image-like instance\n Input object if `image_input` seemed to be an image, loaded Image\n object if `image_input` was a string.\n\n Raises\n ------\n TypeError : if neither string nor image-like passed\n\n Examples\n --------\n >>> from nipy.testing import anatfile\n >>> from nipy.io.api import load_image\n >>> img = as_image(anatfile)\n >>> img2 = as_image(img)\n >>> img2 is img\n True\n ' if is_image(image_input): return image_input if isinstance(image_input, basestring): return load(image_input) raise TypeError('Expecting an image-like object or filename string')
Load image from filename or pass through image instance Parameters ---------- image_input : str or Image instance image or string filename of image. If a string, load image and return. If an image, pass through without modification Returns ------- img : Image or Image-like instance Input object if `image_input` seemed to be an image, loaded Image object if `image_input` was a string. Raises ------ TypeError : if neither string nor image-like passed Examples -------- >>> from nipy.testing import anatfile >>> from nipy.io.api import load_image >>> img = as_image(anatfile) >>> img2 = as_image(img) >>> img2 is img True
nipy/io/files.py
as_image
yarikoptic/NiPy-OLD
1
python
def as_image(image_input): ' Load image from filename or pass through image instance\n\n Parameters\n ----------\n image_input : str or Image instance\n image or string filename of image. If a string, load image and\n return. If an image, pass through without modification\n\n Returns\n -------\n img : Image or Image-like instance\n Input object if `image_input` seemed to be an image, loaded Image\n object if `image_input` was a string.\n\n Raises\n ------\n TypeError : if neither string nor image-like passed\n\n Examples\n --------\n >>> from nipy.testing import anatfile\n >>> from nipy.io.api import load_image\n >>> img = as_image(anatfile)\n >>> img2 = as_image(img)\n >>> img2 is img\n True\n ' if is_image(image_input): return image_input if isinstance(image_input, basestring): return load(image_input) raise TypeError('Expecting an image-like object or filename string')
def as_image(image_input): ' Load image from filename or pass through image instance\n\n Parameters\n ----------\n image_input : str or Image instance\n image or string filename of image. If a string, load image and\n return. If an image, pass through without modification\n\n Returns\n -------\n img : Image or Image-like instance\n Input object if `image_input` seemed to be an image, loaded Image\n object if `image_input` was a string.\n\n Raises\n ------\n TypeError : if neither string nor image-like passed\n\n Examples\n --------\n >>> from nipy.testing import anatfile\n >>> from nipy.io.api import load_image\n >>> img = as_image(anatfile)\n >>> img2 = as_image(img)\n >>> img2 is img\n True\n ' if is_image(image_input): return image_input if isinstance(image_input, basestring): return load(image_input) raise TypeError('Expecting an image-like object or filename string')<|docstring|>Load image from filename or pass through image instance Parameters ---------- image_input : str or Image instance image or string filename of image. If a string, load image and return. If an image, pass through without modification Returns ------- img : Image or Image-like instance Input object if `image_input` seemed to be an image, loaded Image object if `image_input` was a string. Raises ------ TypeError : if neither string nor image-like passed Examples -------- >>> from nipy.testing import anatfile >>> from nipy.io.api import load_image >>> img = as_image(anatfile) >>> img2 = as_image(img) >>> img2 is img True<|endoftext|>
c573cd038586b5ec59e2422eb77715f22e309bbf3f740e3f03447f2bd7025185
def parse_read_options(form, prefix=''): "Extract read options from form data.\n\n Arguments:\n form (obj): Form object\n\n Keyword Arguments:\n prefix (str): prefix for the form fields (default: {''})\n\n Returns:\n (dict): Read options key - value dictionary.\n " read_options = {'encoding': getattr(form, (prefix + 'encoding')).data, 'delimiter': getattr(form, (prefix + 'delimiter')).data} geom = getattr(form, (prefix + 'geom')) lat = getattr(form, (prefix + 'lat')) lon = getattr(form, (prefix + 'lon')) if (geom.data != ''): read_options['geom'] = geom.data elif ((lat.data != '') and (lon.data != '')): read_options['lat'] = lat.data read_options['lon'] = lon.data return read_options
Extract read options from form data. Arguments: form (obj): Form object Keyword Arguments: prefix (str): prefix for the form fields (default: {''}) Returns: (dict): Read options key - value dictionary.
geometry_service/api/helpers.py
parse_read_options
OpertusMundi/geometry-service
0
python
def parse_read_options(form, prefix=): "Extract read options from form data.\n\n Arguments:\n form (obj): Form object\n\n Keyword Arguments:\n prefix (str): prefix for the form fields (default: {})\n\n Returns:\n (dict): Read options key - value dictionary.\n " read_options = {'encoding': getattr(form, (prefix + 'encoding')).data, 'delimiter': getattr(form, (prefix + 'delimiter')).data} geom = getattr(form, (prefix + 'geom')) lat = getattr(form, (prefix + 'lat')) lon = getattr(form, (prefix + 'lon')) if (geom.data != ): read_options['geom'] = geom.data elif ((lat.data != ) and (lon.data != )): read_options['lat'] = lat.data read_options['lon'] = lon.data return read_options
def parse_read_options(form, prefix=): "Extract read options from form data.\n\n Arguments:\n form (obj): Form object\n\n Keyword Arguments:\n prefix (str): prefix for the form fields (default: {})\n\n Returns:\n (dict): Read options key - value dictionary.\n " read_options = {'encoding': getattr(form, (prefix + 'encoding')).data, 'delimiter': getattr(form, (prefix + 'delimiter')).data} geom = getattr(form, (prefix + 'geom')) lat = getattr(form, (prefix + 'lat')) lon = getattr(form, (prefix + 'lon')) if (geom.data != ): read_options['geom'] = geom.data elif ((lat.data != ) and (lon.data != )): read_options['lat'] = lat.data read_options['lon'] = lon.data return read_options<|docstring|>Extract read options from form data. Arguments: form (obj): Form object Keyword Arguments: prefix (str): prefix for the form fields (default: {''}) Returns: (dict): Read options key - value dictionary.<|endoftext|>
609611b9b301b8cfe6a349baba825ea19a5b584e946cd4b59a5365ca5ba73e53
def send_file(file): 'Create a send file response.\n\n Arguments:\n file (str): Path of the file.\n\n Returns:\n (obj): Flask response\n ' from flask import send_file as flask_send_file file_content = open(file, 'rb') filename = os.path.basename(file) response = flask_send_file(file_content, attachment_filename=filename, as_attachment=True) response.headers['Content-Length'] = str(os.path.getsize(file)) return response
Create a send file response. Arguments: file (str): Path of the file. Returns: (obj): Flask response
geometry_service/api/helpers.py
send_file
OpertusMundi/geometry-service
0
python
def send_file(file): 'Create a send file response.\n\n Arguments:\n file (str): Path of the file.\n\n Returns:\n (obj): Flask response\n ' from flask import send_file as flask_send_file file_content = open(file, 'rb') filename = os.path.basename(file) response = flask_send_file(file_content, attachment_filename=filename, as_attachment=True) response.headers['Content-Length'] = str(os.path.getsize(file)) return response
def send_file(file): 'Create a send file response.\n\n Arguments:\n file (str): Path of the file.\n\n Returns:\n (obj): Flask response\n ' from flask import send_file as flask_send_file file_content = open(file, 'rb') filename = os.path.basename(file) response = flask_send_file(file_content, attachment_filename=filename, as_attachment=True) response.headers['Content-Length'] = str(os.path.getsize(file)) return response<|docstring|>Create a send file response. Arguments: file (str): Path of the file. Returns: (obj): Flask response<|endoftext|>
eedd498d1b3b90ab04232b18a94b4ab9441b9e08e2d79cc186aae2718fe9aeae
def copy_to_output(file, ticket): 'Copy file to output dir, after creating the containing path.\n\n Arguments:\n file (str): Path of the file.\n ticket (str): Request ticket.\n\n Returns:\n (str): Relative to output dir path of the copied file.\n ' from datetime import datetime from shutil import copyfile filename = os.path.basename(file) output_path = os.path.join(datetime.now().strftime('%y%m'), ticket) output_file = os.path.join(output_path, filename) full_output = os.path.join(os.environ['OUTPUT_DIR'], output_path) os.makedirs(full_output, exist_ok=True) copyfile(file, os.path.join(full_output, filename)) return output_file
Copy file to output dir, after creating the containing path. Arguments: file (str): Path of the file. ticket (str): Request ticket. Returns: (str): Relative to output dir path of the copied file.
geometry_service/api/helpers.py
copy_to_output
OpertusMundi/geometry-service
0
python
def copy_to_output(file, ticket): 'Copy file to output dir, after creating the containing path.\n\n Arguments:\n file (str): Path of the file.\n ticket (str): Request ticket.\n\n Returns:\n (str): Relative to output dir path of the copied file.\n ' from datetime import datetime from shutil import copyfile filename = os.path.basename(file) output_path = os.path.join(datetime.now().strftime('%y%m'), ticket) output_file = os.path.join(output_path, filename) full_output = os.path.join(os.environ['OUTPUT_DIR'], output_path) os.makedirs(full_output, exist_ok=True) copyfile(file, os.path.join(full_output, filename)) return output_file
def copy_to_output(file, ticket): 'Copy file to output dir, after creating the containing path.\n\n Arguments:\n file (str): Path of the file.\n ticket (str): Request ticket.\n\n Returns:\n (str): Relative to output dir path of the copied file.\n ' from datetime import datetime from shutil import copyfile filename = os.path.basename(file) output_path = os.path.join(datetime.now().strftime('%y%m'), ticket) output_file = os.path.join(output_path, filename) full_output = os.path.join(os.environ['OUTPUT_DIR'], output_path) os.makedirs(full_output, exist_ok=True) copyfile(file, os.path.join(full_output, filename)) return output_file<|docstring|>Copy file to output dir, after creating the containing path. Arguments: file (str): Path of the file. ticket (str): Request ticket. Returns: (str): Relative to output dir path of the copied file.<|endoftext|>
f5d9dff121006a6558dd07cf04793450aab2fe4509543b2797b77c02c750e37f
def burnin_action_func(): 'Select random actions until model is updated one or more times.' return np.random.uniform(action_space.low, action_space.high).astype(np.float32)
Select random actions until model is updated one or more times.
examples/mujoco/reproduction/td3/train_td3.py
burnin_action_func
tarokiritani/pfrl
824
python
def burnin_action_func(): return np.random.uniform(action_space.low, action_space.high).astype(np.float32)
def burnin_action_func(): return np.random.uniform(action_space.low, action_space.high).astype(np.float32)<|docstring|>Select random actions until model is updated one or more times.<|endoftext|>
5bb2556645933927abf01d34def954c0e70a74a72bc97c975fefc8695d8c785a
@with_feature_flags(ALERT_REPORTS=False) @pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule_disabled(self): '\n ReportSchedule Api: Test get report schedule 404s when feature is disabled\n ' report_schedule = db.session.query(ReportSchedule).filter((ReportSchedule.name == 'name1')).first() self.login(username='admin') uri = f'api/v1/report/{report_schedule.id}' rv = self.client.get(uri) assert (rv.status_code == 404)
ReportSchedule Api: Test get report schedule 404s when feature is disabled
tests/integration_tests/reports/api_tests.py
test_get_report_schedule_disabled
CodeingBoy/superset
2
python
@with_feature_flags(ALERT_REPORTS=False) @pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule_disabled(self): '\n \n ' report_schedule = db.session.query(ReportSchedule).filter((ReportSchedule.name == 'name1')).first() self.login(username='admin') uri = f'api/v1/report/{report_schedule.id}' rv = self.client.get(uri) assert (rv.status_code == 404)
@with_feature_flags(ALERT_REPORTS=False) @pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule_disabled(self): '\n \n ' report_schedule = db.session.query(ReportSchedule).filter((ReportSchedule.name == 'name1')).first() self.login(username='admin') uri = f'api/v1/report/{report_schedule.id}' rv = self.client.get(uri) assert (rv.status_code == 404)<|docstring|>ReportSchedule Api: Test get report schedule 404s when feature is disabled<|endoftext|>
682468918c6f242615652b91c8b03f17a77b829f61aac356c395b81262fdd350
@pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule(self): '\n ReportSchedule Api: Test get report schedule\n ' report_schedule = db.session.query(ReportSchedule).filter((ReportSchedule.name == 'name1')).first() self.login(username='admin') uri = f'api/v1/report/{report_schedule.id}' rv = self.get_assert_metric(uri, 'get') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 200) expected_result = {'active': report_schedule.active, 'chart': {'id': report_schedule.chart.id, 'slice_name': report_schedule.chart.slice_name, 'viz_type': report_schedule.chart.viz_type}, 'context_markdown': report_schedule.context_markdown, 'crontab': report_schedule.crontab, 'dashboard': None, 'database': {'id': report_schedule.database.id, 'database_name': report_schedule.database.database_name}, 'description': report_schedule.description, 'grace_period': report_schedule.grace_period, 'id': report_schedule.id, 'last_eval_dttm': report_schedule.last_eval_dttm, 'last_state': report_schedule.last_state, 'last_value': report_schedule.last_value, 'last_value_row_json': report_schedule.last_value_row_json, 'log_retention': report_schedule.log_retention, 'name': report_schedule.name, 'recipients': [{'id': report_schedule.recipients[0].id, 'recipient_config_json': '{"target": "example@example.com"}', 'type': 'Email'}], 'timezone': report_schedule.timezone, 'type': report_schedule.type, 'validator_config_json': report_schedule.validator_config_json, 'validator_type': report_schedule.validator_type} for key in expected_result: assert (data['result'][key] == expected_result[key]) assert ({'first_name': 'admin', 'id': 1, 'last_name': 'user'} in data['result']['owners']) assert ({'first_name': 'alpha', 'id': 5, 'last_name': 'user'} in data['result']['owners']) assert (len(data['result']['owners']) == 2)
ReportSchedule Api: Test get report schedule
tests/integration_tests/reports/api_tests.py
test_get_report_schedule
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule(self): '\n \n ' report_schedule = db.session.query(ReportSchedule).filter((ReportSchedule.name == 'name1')).first() self.login(username='admin') uri = f'api/v1/report/{report_schedule.id}' rv = self.get_assert_metric(uri, 'get') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 200) expected_result = {'active': report_schedule.active, 'chart': {'id': report_schedule.chart.id, 'slice_name': report_schedule.chart.slice_name, 'viz_type': report_schedule.chart.viz_type}, 'context_markdown': report_schedule.context_markdown, 'crontab': report_schedule.crontab, 'dashboard': None, 'database': {'id': report_schedule.database.id, 'database_name': report_schedule.database.database_name}, 'description': report_schedule.description, 'grace_period': report_schedule.grace_period, 'id': report_schedule.id, 'last_eval_dttm': report_schedule.last_eval_dttm, 'last_state': report_schedule.last_state, 'last_value': report_schedule.last_value, 'last_value_row_json': report_schedule.last_value_row_json, 'log_retention': report_schedule.log_retention, 'name': report_schedule.name, 'recipients': [{'id': report_schedule.recipients[0].id, 'recipient_config_json': '{"target": "example@example.com"}', 'type': 'Email'}], 'timezone': report_schedule.timezone, 'type': report_schedule.type, 'validator_config_json': report_schedule.validator_config_json, 'validator_type': report_schedule.validator_type} for key in expected_result: assert (data['result'][key] == expected_result[key]) assert ({'first_name': 'admin', 'id': 1, 'last_name': 'user'} in data['result']['owners']) assert ({'first_name': 'alpha', 'id': 5, 'last_name': 'user'} in data['result']['owners']) assert (len(data['result']['owners']) == 2)
@pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule(self): '\n \n ' report_schedule = db.session.query(ReportSchedule).filter((ReportSchedule.name == 'name1')).first() self.login(username='admin') uri = f'api/v1/report/{report_schedule.id}' rv = self.get_assert_metric(uri, 'get') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 200) expected_result = {'active': report_schedule.active, 'chart': {'id': report_schedule.chart.id, 'slice_name': report_schedule.chart.slice_name, 'viz_type': report_schedule.chart.viz_type}, 'context_markdown': report_schedule.context_markdown, 'crontab': report_schedule.crontab, 'dashboard': None, 'database': {'id': report_schedule.database.id, 'database_name': report_schedule.database.database_name}, 'description': report_schedule.description, 'grace_period': report_schedule.grace_period, 'id': report_schedule.id, 'last_eval_dttm': report_schedule.last_eval_dttm, 'last_state': report_schedule.last_state, 'last_value': report_schedule.last_value, 'last_value_row_json': report_schedule.last_value_row_json, 'log_retention': report_schedule.log_retention, 'name': report_schedule.name, 'recipients': [{'id': report_schedule.recipients[0].id, 'recipient_config_json': '{"target": "example@example.com"}', 'type': 'Email'}], 'timezone': report_schedule.timezone, 'type': report_schedule.type, 'validator_config_json': report_schedule.validator_config_json, 'validator_type': report_schedule.validator_type} for key in expected_result: assert (data['result'][key] == expected_result[key]) assert ({'first_name': 'admin', 'id': 1, 'last_name': 'user'} in data['result']['owners']) assert ({'first_name': 'alpha', 'id': 5, 'last_name': 'user'} in data['result']['owners']) assert (len(data['result']['owners']) == 2)<|docstring|>ReportSchedule Api: Test get report schedule<|endoftext|>
c19c08ba7e8d6d4da0784249af909de3f89060b69169b91958818207270232a4
def test_info_report_schedule(self): '\n ReportSchedule API: Test info\n ' self.login(username='admin') uri = f'api/v1/report/_info' rv = self.get_assert_metric(uri, 'info') assert (rv.status_code == 200)
ReportSchedule API: Test info
tests/integration_tests/reports/api_tests.py
test_info_report_schedule
CodeingBoy/superset
2
python
def test_info_report_schedule(self): '\n \n ' self.login(username='admin') uri = f'api/v1/report/_info' rv = self.get_assert_metric(uri, 'info') assert (rv.status_code == 200)
def test_info_report_schedule(self): '\n \n ' self.login(username='admin') uri = f'api/v1/report/_info' rv = self.get_assert_metric(uri, 'info') assert (rv.status_code == 200)<|docstring|>ReportSchedule API: Test info<|endoftext|>
a3807f4d5f448ea1cd13eddace9e133c4bc92d0cd0c270e0672fec46f5e24100
def test_info_security_report(self): '\n ReportSchedule API: Test info security\n ' self.login(username='admin') params = {'keys': ['permissions']} uri = f'api/v1/report/_info?q={prison.dumps(params)}' rv = self.get_assert_metric(uri, 'info') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 200) assert ('can_read' in data['permissions']) assert ('can_write' in data['permissions']) assert (len(data['permissions']) == 2)
ReportSchedule API: Test info security
tests/integration_tests/reports/api_tests.py
test_info_security_report
CodeingBoy/superset
2
python
def test_info_security_report(self): '\n \n ' self.login(username='admin') params = {'keys': ['permissions']} uri = f'api/v1/report/_info?q={prison.dumps(params)}' rv = self.get_assert_metric(uri, 'info') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 200) assert ('can_read' in data['permissions']) assert ('can_write' in data['permissions']) assert (len(data['permissions']) == 2)
def test_info_security_report(self): '\n \n ' self.login(username='admin') params = {'keys': ['permissions']} uri = f'api/v1/report/_info?q={prison.dumps(params)}' rv = self.get_assert_metric(uri, 'info') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 200) assert ('can_read' in data['permissions']) assert ('can_write' in data['permissions']) assert (len(data['permissions']) == 2)<|docstring|>ReportSchedule API: Test info security<|endoftext|>
18fc55324cc9418046c391abdfef9eaf3990d82bd7610d3509acdcb16ab4f349
@pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule_not_found(self): '\n ReportSchedule Api: Test get report schedule not found\n ' max_id = db.session.query(func.max(ReportSchedule.id)).scalar() self.login(username='admin') uri = f'api/v1/report/{(max_id + 1)}' rv = self.get_assert_metric(uri, 'get') assert (rv.status_code == 404)
ReportSchedule Api: Test get report schedule not found
tests/integration_tests/reports/api_tests.py
test_get_report_schedule_not_found
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule_not_found(self): '\n \n ' max_id = db.session.query(func.max(ReportSchedule.id)).scalar() self.login(username='admin') uri = f'api/v1/report/{(max_id + 1)}' rv = self.get_assert_metric(uri, 'get') assert (rv.status_code == 404)
@pytest.mark.usefixtures('create_report_schedules') def test_get_report_schedule_not_found(self): '\n \n ' max_id = db.session.query(func.max(ReportSchedule.id)).scalar() self.login(username='admin') uri = f'api/v1/report/{(max_id + 1)}' rv = self.get_assert_metric(uri, 'get') assert (rv.status_code == 404)<|docstring|>ReportSchedule Api: Test get report schedule not found<|endoftext|>
4eb2b32cd95b443f4953a23e299060c5542dae7af26f7184431b1902efebf78e
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule(self): '\n ReportSchedule Api: Test get list report schedules\n ' self.login(username='admin') uri = f'api/v1/report/' rv = self.get_assert_metric(uri, 'get_list') expected_fields = ['active', 'changed_by', 'changed_on', 'changed_on_delta_humanized', 'chart_id', 'created_by', 'created_on', 'creation_method', 'crontab', 'crontab_humanized', 'dashboard_id', 'description', 'id', 'last_eval_dttm', 'last_state', 'name', 'owners', 'recipients', 'timezone', 'type'] assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT) data_keys = sorted(list(data['result'][0].keys())) assert (expected_fields == data_keys) expected_owners_fields = ['first_name', 'id', 'last_name'] data_keys = sorted(list(data['result'][0]['owners'][0].keys())) assert (expected_owners_fields == data_keys) expected_recipients_fields = ['id', 'type'] data_keys = sorted(list(data['result'][1]['recipients'][0].keys())) assert (expected_recipients_fields == data_keys)
ReportSchedule Api: Test get list report schedules
tests/integration_tests/reports/api_tests.py
test_get_list_report_schedule
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule(self): '\n \n ' self.login(username='admin') uri = f'api/v1/report/' rv = self.get_assert_metric(uri, 'get_list') expected_fields = ['active', 'changed_by', 'changed_on', 'changed_on_delta_humanized', 'chart_id', 'created_by', 'created_on', 'creation_method', 'crontab', 'crontab_humanized', 'dashboard_id', 'description', 'id', 'last_eval_dttm', 'last_state', 'name', 'owners', 'recipients', 'timezone', 'type'] assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT) data_keys = sorted(list(data['result'][0].keys())) assert (expected_fields == data_keys) expected_owners_fields = ['first_name', 'id', 'last_name'] data_keys = sorted(list(data['result'][0]['owners'][0].keys())) assert (expected_owners_fields == data_keys) expected_recipients_fields = ['id', 'type'] data_keys = sorted(list(data['result'][1]['recipients'][0].keys())) assert (expected_recipients_fields == data_keys)
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule(self): '\n \n ' self.login(username='admin') uri = f'api/v1/report/' rv = self.get_assert_metric(uri, 'get_list') expected_fields = ['active', 'changed_by', 'changed_on', 'changed_on_delta_humanized', 'chart_id', 'created_by', 'created_on', 'creation_method', 'crontab', 'crontab_humanized', 'dashboard_id', 'description', 'id', 'last_eval_dttm', 'last_state', 'name', 'owners', 'recipients', 'timezone', 'type'] assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT) data_keys = sorted(list(data['result'][0].keys())) assert (expected_fields == data_keys) expected_owners_fields = ['first_name', 'id', 'last_name'] data_keys = sorted(list(data['result'][0]['owners'][0].keys())) assert (expected_owners_fields == data_keys) expected_recipients_fields = ['id', 'type'] data_keys = sorted(list(data['result'][1]['recipients'][0].keys())) assert (expected_recipients_fields == data_keys)<|docstring|>ReportSchedule Api: Test get list report schedules<|endoftext|>
07dd9c4b5e398cea0eb2c1ca7e954a9320dd0a19f4cf6a18c083b74d06b33e59
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_sorting(self): '\n ReportSchedule Api: Test sorting on get list report schedules\n ' self.login(username='admin') uri = 'api/v1/report/' order_columns = ['active', 'created_by.first_name', 'changed_by.first_name', 'changed_on', 'changed_on_delta_humanized', 'created_on', 'crontab', 'description', 'last_eval_dttm', 'name', 'type', 'crontab_humanized'] for order_column in order_columns: arguments = {'order_column': order_column, 'order_direction': 'asc'} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200)
ReportSchedule Api: Test sorting on get list report schedules
tests/integration_tests/reports/api_tests.py
test_get_list_report_schedule_sorting
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_sorting(self): '\n \n ' self.login(username='admin') uri = 'api/v1/report/' order_columns = ['active', 'created_by.first_name', 'changed_by.first_name', 'changed_on', 'changed_on_delta_humanized', 'created_on', 'crontab', 'description', 'last_eval_dttm', 'name', 'type', 'crontab_humanized'] for order_column in order_columns: arguments = {'order_column': order_column, 'order_direction': 'asc'} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200)
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_sorting(self): '\n \n ' self.login(username='admin') uri = 'api/v1/report/' order_columns = ['active', 'created_by.first_name', 'changed_by.first_name', 'changed_on', 'changed_on_delta_humanized', 'created_on', 'crontab', 'description', 'last_eval_dttm', 'name', 'type', 'crontab_humanized'] for order_column in order_columns: arguments = {'order_column': order_column, 'order_direction': 'asc'} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200)<|docstring|>ReportSchedule Api: Test sorting on get list report schedules<|endoftext|>
4f416f348a3a0bfc1b4076b5b8b282805ebf7914824a1afbc9594f0a831d4577
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_name(self): '\n ReportSchedule Api: Test filter name on get list report schedules\n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'name', 'opr': 'ct', 'value': '2'}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') expected_result = {'name': 'name2'} assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 1) assert (data['result'][0] == expected_result)
ReportSchedule Api: Test filter name on get list report schedules
tests/integration_tests/reports/api_tests.py
test_get_list_report_schedule_filter_name
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_name(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'name', 'opr': 'ct', 'value': '2'}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') expected_result = {'name': 'name2'} assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 1) assert (data['result'][0] == expected_result)
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_name(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'name', 'opr': 'ct', 'value': '2'}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') expected_result = {'name': 'name2'} assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 1) assert (data['result'][0] == expected_result)<|docstring|>ReportSchedule Api: Test filter name on get list report schedules<|endoftext|>
acf7ec0dbd9c41d3c705e2dbf59bdf56b1474206692824f64e39f4768a955859
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_custom(self): '\n ReportSchedule Api: Test custom filter on get list report schedules\n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'name', 'opr': 'report_all_text', 'value': 'table3'}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') expected_result = {'name': 'name3'} assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 1) assert (data['result'][0] == expected_result)
ReportSchedule Api: Test custom filter on get list report schedules
tests/integration_tests/reports/api_tests.py
test_get_list_report_schedule_filter_custom
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_custom(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'name', 'opr': 'report_all_text', 'value': 'table3'}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') expected_result = {'name': 'name3'} assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 1) assert (data['result'][0] == expected_result)
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_custom(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'name', 'opr': 'report_all_text', 'value': 'table3'}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') expected_result = {'name': 'name3'} assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 1) assert (data['result'][0] == expected_result)<|docstring|>ReportSchedule Api: Test custom filter on get list report schedules<|endoftext|>
001725c054ff96f270f96bc7fa4717d1619f48087114fb9bddb785c72ec9eba7
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_active(self): '\n ReportSchedule Api: Test active filter on get list report schedules\n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'active', 'opr': 'eq', 'value': True}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT)
ReportSchedule Api: Test active filter on get list report schedules
tests/integration_tests/reports/api_tests.py
test_get_list_report_schedule_filter_active
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_active(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'active', 'opr': 'eq', 'value': True}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT)
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_active(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'active', 'opr': 'eq', 'value': True}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT)<|docstring|>ReportSchedule Api: Test active filter on get list report schedules<|endoftext|>
9a1c01b6da0886a1660e85bc83f4873ea9aff9243a5c0e749e4398afe9b8f6e8
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_type(self): '\n ReportSchedule Api: Test type filter on get list report schedules\n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'type', 'opr': 'eq', 'value': ReportScheduleType.ALERT}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT) arguments = {'columns': ['name'], 'filters': [{'col': 'type', 'opr': 'eq', 'value': ReportScheduleType.REPORT}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 0)
ReportSchedule Api: Test type filter on get list report schedules
tests/integration_tests/reports/api_tests.py
test_get_list_report_schedule_filter_type
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_type(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'type', 'opr': 'eq', 'value': ReportScheduleType.ALERT}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT) arguments = {'columns': ['name'], 'filters': [{'col': 'type', 'opr': 'eq', 'value': ReportScheduleType.REPORT}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 0)
@pytest.mark.usefixtures('create_report_schedules') def test_get_list_report_schedule_filter_type(self): '\n \n ' self.login(username='admin') arguments = {'columns': ['name'], 'filters': [{'col': 'type', 'opr': 'eq', 'value': ReportScheduleType.ALERT}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == REPORTS_COUNT) arguments = {'columns': ['name'], 'filters': [{'col': 'type', 'opr': 'eq', 'value': ReportScheduleType.REPORT}]} uri = f'api/v1/report/?q={prison.dumps(arguments)}' rv = self.get_assert_metric(uri, 'get_list') assert (rv.status_code == 200) data = json.loads(rv.data.decode('utf-8')) assert (data['count'] == 0)<|docstring|>ReportSchedule Api: Test type filter on get list report schedules<|endoftext|>
3e99aba5e6c425cae320ec0241cf0df05c3c59fb566bd9066135e5eb3dcfab0d
@pytest.mark.usefixtures('create_report_schedules') def test_get_related_report_schedule(self): '\n ReportSchedule Api: Test get releated report schedule\n ' self.login(username='admin') related_columns = ['created_by', 'chart', 'dashboard', 'database'] for related_column in related_columns: uri = f'api/v1/report/related/{related_column}' rv = self.client.get(uri) assert (rv.status_code == 200)
ReportSchedule Api: Test get releated report schedule
tests/integration_tests/reports/api_tests.py
test_get_related_report_schedule
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_get_related_report_schedule(self): '\n \n ' self.login(username='admin') related_columns = ['created_by', 'chart', 'dashboard', 'database'] for related_column in related_columns: uri = f'api/v1/report/related/{related_column}' rv = self.client.get(uri) assert (rv.status_code == 200)
@pytest.mark.usefixtures('create_report_schedules') def test_get_related_report_schedule(self): '\n \n ' self.login(username='admin') related_columns = ['created_by', 'chart', 'dashboard', 'database'] for related_column in related_columns: uri = f'api/v1/report/related/{related_column}' rv = self.client.get(uri) assert (rv.status_code == 200)<|docstring|>ReportSchedule Api: Test get releated report schedule<|endoftext|>
644877590ef9918b88872eb6a21b8e0e46fa05a730bf8e44e2b1cb827f45e9e9
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices') def test_create_report_schedule(self): '\n ReportSchedule Api: Test create report schedule\n ' self.login(username='admin') chart = db.session.query(Slice).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'grace_period': 14400, 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) created_model = db.session.query(ReportSchedule).get(data.get('id')) assert (created_model is not None) assert (created_model.name == report_schedule_data['name']) assert (created_model.grace_period == report_schedule_data['grace_period']) assert (created_model.working_timeout == report_schedule_data['working_timeout']) assert (created_model.description == report_schedule_data['description']) assert (created_model.crontab == report_schedule_data['crontab']) assert (created_model.chart.id == report_schedule_data['chart']) assert (created_model.database.id == report_schedule_data['database']) assert (created_model.creation_method == report_schedule_data['creation_method']) db.session.delete(created_model) db.session.commit()
ReportSchedule Api: Test create report schedule
tests/integration_tests/reports/api_tests.py
test_create_report_schedule
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices') def test_create_report_schedule(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'grace_period': 14400, 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) created_model = db.session.query(ReportSchedule).get(data.get('id')) assert (created_model is not None) assert (created_model.name == report_schedule_data['name']) assert (created_model.grace_period == report_schedule_data['grace_period']) assert (created_model.working_timeout == report_schedule_data['working_timeout']) assert (created_model.description == report_schedule_data['description']) assert (created_model.crontab == report_schedule_data['crontab']) assert (created_model.chart.id == report_schedule_data['chart']) assert (created_model.database.id == report_schedule_data['database']) assert (created_model.creation_method == report_schedule_data['creation_method']) db.session.delete(created_model) db.session.commit()
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices') def test_create_report_schedule(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'grace_period': 14400, 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) created_model = db.session.query(ReportSchedule).get(data.get('id')) assert (created_model is not None) assert (created_model.name == report_schedule_data['name']) assert (created_model.grace_period == report_schedule_data['grace_period']) assert (created_model.working_timeout == report_schedule_data['working_timeout']) assert (created_model.description == report_schedule_data['description']) assert (created_model.crontab == report_schedule_data['crontab']) assert (created_model.chart.id == report_schedule_data['chart']) assert (created_model.database.id == report_schedule_data['database']) assert (created_model.creation_method == report_schedule_data['creation_method']) db.session.delete(created_model) db.session.commit()<|docstring|>ReportSchedule Api: Test create report schedule<|endoftext|>
59d0ea5547f01f35e06b23d6e6c499d1c5601865a5edef50b5d21979bc776bca
@pytest.mark.usefixtures('create_report_schedules') def test_create_report_schedule_uniqueness(self): '\n ReportSchedule Api: Test create report schedule uniqueness\n ' self.login(username='admin') chart = db.session.query(Slice).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 422) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'name': ['An alert named "name3" already exists']}}) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'chart': chart.id} uri = 'api/v1/report/' rv = self.client.post(uri, json=report_schedule_data) assert (rv.status_code == 201) data = json.loads(rv.data.decode('utf-8')) created_model = db.session.query(ReportSchedule).get(data.get('id')) db.session.delete(created_model) db.session.commit()
ReportSchedule Api: Test create report schedule uniqueness
tests/integration_tests/reports/api_tests.py
test_create_report_schedule_uniqueness
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('create_report_schedules') def test_create_report_schedule_uniqueness(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 422) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'name': ['An alert named "name3" already exists']}}) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'chart': chart.id} uri = 'api/v1/report/' rv = self.client.post(uri, json=report_schedule_data) assert (rv.status_code == 201) data = json.loads(rv.data.decode('utf-8')) created_model = db.session.query(ReportSchedule).get(data.get('id')) db.session.delete(created_model) db.session.commit()
@pytest.mark.usefixtures('create_report_schedules') def test_create_report_schedule_uniqueness(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 422) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'name': ['An alert named "name3" already exists']}}) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'chart': chart.id} uri = 'api/v1/report/' rv = self.client.post(uri, json=report_schedule_data) assert (rv.status_code == 201) data = json.loads(rv.data.decode('utf-8')) created_model = db.session.query(ReportSchedule).get(data.get('id')) db.session.delete(created_model) db.session.commit()<|docstring|>ReportSchedule Api: Test create report schedule uniqueness<|endoftext|>
b028e210f2665eb913180d69681779beb206cceec0cd9493c4e690da12784c08
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_report_schedule_schema(self): '\n ReportSchedule Api: Test create report schedule schema check\n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': (- 10), 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'grace_period': (- 10), 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new4', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': chart.id, 'dashboard': None, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': None, 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': None, 'dashboard': dashboard.id, 'database': example_db.id} rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'timezone': ['Field may not be null.']}}) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': 'this is not a timezone', 'dashboard': dashboard.id, 'database': example_db.id} rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'timezone': [f"Must be one of: {', '.join(pytz.all_timezones)}."]}}) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new6', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': 'America/Los_Angeles', 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (data['result']['timezone'] == 'America/Los_Angeles') assert (rv.status_code == 201)
ReportSchedule Api: Test create report schedule schema check
tests/integration_tests/reports/api_tests.py
test_create_report_schedule_schema
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_report_schedule_schema(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': (- 10), 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'grace_period': (- 10), 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new4', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': chart.id, 'dashboard': None, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': None, 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': None, 'dashboard': dashboard.id, 'database': example_db.id} rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'timezone': ['Field may not be null.']}}) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': 'this is not a timezone', 'dashboard': dashboard.id, 'database': example_db.id} rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'timezone': [f"Must be one of: {', '.join(pytz.all_timezones)}."]}}) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new6', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': 'America/Los_Angeles', 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (data['result']['timezone'] == 'America/Los_Angeles') assert (rv.status_code == 201)
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_report_schedule_schema(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': (- 10), 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'grace_period': (- 10), 'working_timeout': 3600, 'chart': chart.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new4', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': chart.id, 'dashboard': None, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'chart': None, 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': None, 'dashboard': dashboard.id, 'database': example_db.id} rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'timezone': ['Field may not be null.']}}) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new5', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': 'this is not a timezone', 'dashboard': dashboard.id, 'database': example_db.id} rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 400) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'timezone': [f"Must be one of: {', '.join(pytz.all_timezones)}."]}}) report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new6', 'description': 'description', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'crontab': '0 9 * * *', 'recipients': [{'type': ReportRecipientType.EMAIL, 'recipient_config_json': {'target': 'example@example.com'}}, {'type': ReportRecipientType.SLACK, 'recipient_config_json': {'target': 'channel'}}], 'working_timeout': 3600, 'timezone': 'America/Los_Angeles', 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (data['result']['timezone'] == 'America/Los_Angeles') assert (rv.status_code == 201)<|docstring|>ReportSchedule Api: Test create report schedule schema check<|endoftext|>
6277081b4d7257df6228db371b948cbc68ff63a1318ba33fc9eb3cb03d7cdb7c
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_unsaved_report_schedule_schema(self): '\n ReportSchedule Api: Test create report schedule with unsaved chart\n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'chart': 0} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 422) assert (data['message']['chart'] == 'Please save your chart first, then try creating a new email report.')
ReportSchedule Api: Test create report schedule with unsaved chart
tests/integration_tests/reports/api_tests.py
test_unsaved_report_schedule_schema
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_unsaved_report_schedule_schema(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'chart': 0} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 422) assert (data['message']['chart'] == 'Please save your chart first, then try creating a new email report.')
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_unsaved_report_schedule_schema(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'chart': 0} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 422) assert (data['message']['chart'] == 'Please save your chart first, then try creating a new email report.')<|docstring|>ReportSchedule Api: Test create report schedule with unsaved chart<|endoftext|>
48e17eb16276f3b8d85fe67038576e33089408f075be7f2f613b63d4c51b7445
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_no_dashboard_report_schedule_schema(self): '\n ReportSchedule Api: Test create report schedule with no dashboard id\n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *'} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 422) assert (data['message']['dashboard'] == 'Please save your dashboard first, then try creating a new email report.')
ReportSchedule Api: Test create report schedule with no dashboard id
tests/integration_tests/reports/api_tests.py
test_no_dashboard_report_schedule_schema
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_no_dashboard_report_schedule_schema(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *'} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 422) assert (data['message']['dashboard'] == 'Please save your dashboard first, then try creating a new email report.')
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_no_dashboard_report_schedule_schema(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name3', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *'} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 422) assert (data['message']['dashboard'] == 'Please save your dashboard first, then try creating a new email report.')<|docstring|>ReportSchedule Api: Test create report schedule with no dashboard id<|endoftext|>
60e351f37cf30e7e208a7b044403691354c516b1e012ed224554ab76881f2966
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_multiple_creation_method_report_schedule_charts(self): '\n ReportSchedule Api: Test create multiple reports with the same creation method\n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name4', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'chart': chart.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name5', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'chart': chart.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 409) assert (data == {'errors': [{'message': 'Resource already has an attached report.', 'error_type': 'GENERIC_COMMAND_ERROR', 'level': 'warning', 'extra': {'issue_codes': [{'code': 1010, 'message': 'Issue 1010 - Superset encountered an error while running a command.'}]}}]})
ReportSchedule Api: Test create multiple reports with the same creation method
tests/integration_tests/reports/api_tests.py
test_create_multiple_creation_method_report_schedule_charts
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_multiple_creation_method_report_schedule_charts(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name4', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'chart': chart.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name5', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'chart': chart.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 409) assert (data == {'errors': [{'message': 'Resource already has an attached report.', 'error_type': 'GENERIC_COMMAND_ERROR', 'level': 'warning', 'extra': {'issue_codes': [{'code': 1010, 'message': 'Issue 1010 - Superset encountered an error while running a command.'}]}}]})
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_multiple_creation_method_report_schedule_charts(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name4', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'chart': chart.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name5', 'description': 'description', 'creation_method': ReportCreationMethod.CHARTS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'chart': chart.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 409) assert (data == {'errors': [{'message': 'Resource already has an attached report.', 'error_type': 'GENERIC_COMMAND_ERROR', 'level': 'warning', 'extra': {'issue_codes': [{'code': 1010, 'message': 'Issue 1010 - Superset encountered an error while running a command.'}]}}]})<|docstring|>ReportSchedule Api: Test create multiple reports with the same creation method<|endoftext|>
511a33f2a59390a2922f1658ad64e0232d02eb1e2cfa26f3968411f282fa2024
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_multiple_creation_method_report_schedule_dashboards(self): '\n ReportSchedule Api: Test create multiple reports with the same creation method\n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name4', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'dashboard': dashboard.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name5', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'dashboard': dashboard.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 409) assert (data == {'errors': [{'message': 'Resource already has an attached report.', 'error_type': 'GENERIC_COMMAND_ERROR', 'level': 'warning', 'extra': {'issue_codes': [{'code': 1010, 'message': 'Issue 1010 - Superset encountered an error while running a command.'}]}}]})
ReportSchedule Api: Test create multiple reports with the same creation method
tests/integration_tests/reports/api_tests.py
test_create_multiple_creation_method_report_schedule_dashboards
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_multiple_creation_method_report_schedule_dashboards(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name4', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'dashboard': dashboard.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name5', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'dashboard': dashboard.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 409) assert (data == {'errors': [{'message': 'Resource already has an attached report.', 'error_type': 'GENERIC_COMMAND_ERROR', 'level': 'warning', 'extra': {'issue_codes': [{'code': 1010, 'message': 'Issue 1010 - Superset encountered an error while running a command.'}]}}]})
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices', 'create_report_schedules') def test_create_multiple_creation_method_report_schedule_dashboards(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name4', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'dashboard': dashboard.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 201) report_schedule_data = {'type': ReportScheduleType.REPORT, 'name': 'name5', 'description': 'description', 'creation_method': ReportCreationMethod.DASHBOARDS, 'crontab': '0 9 * * *', 'working_timeout': 3600, 'dashboard': dashboard.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') data = json.loads(rv.data.decode('utf-8')) assert (rv.status_code == 409) assert (data == {'errors': [{'message': 'Resource already has an attached report.', 'error_type': 'GENERIC_COMMAND_ERROR', 'level': 'warning', 'extra': {'issue_codes': [{'code': 1010, 'message': 'Issue 1010 - Superset encountered an error while running a command.'}]}}]})<|docstring|>ReportSchedule Api: Test create multiple reports with the same creation method<|endoftext|>
1251472d87687708eb701cdb3ca56a2c8b65356d984d28ad356aff22d022a4a0
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices') def test_create_report_schedule_chart_dash_validation(self): '\n ReportSchedule Api: Test create report schedule chart and dashboard validation\n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'chart': chart.id, 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 422) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'chart': 'Choose a chart or dashboard not both'}})
ReportSchedule Api: Test create report schedule chart and dashboard validation
tests/integration_tests/reports/api_tests.py
test_create_report_schedule_chart_dash_validation
CodeingBoy/superset
2
python
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices') def test_create_report_schedule_chart_dash_validation(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'chart': chart.id, 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 422) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'chart': 'Choose a chart or dashboard not both'}})
@pytest.mark.usefixtures('load_birth_names_dashboard_with_slices') def test_create_report_schedule_chart_dash_validation(self): '\n \n ' self.login(username='admin') chart = db.session.query(Slice).first() dashboard = db.session.query(Dashboard).first() example_db = get_example_database() report_schedule_data = {'type': ReportScheduleType.ALERT, 'name': 'new3', 'description': 'description', 'crontab': '0 9 * * *', 'creation_method': ReportCreationMethod.ALERTS_REPORTS, 'chart': chart.id, 'dashboard': dashboard.id, 'database': example_db.id} uri = 'api/v1/report/' rv = self.post_assert_metric(uri, report_schedule_data, 'post') assert (rv.status_code == 422) data = json.loads(rv.data.decode('utf-8')) assert (data == {'message': {'chart': 'Choose a chart or dashboard not both'}})<|docstring|>ReportSchedule Api: Test create report schedule chart and dashboard validation<|endoftext|>