code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
def fetch_dset_dirs(dset_name=None):
"""
Finds the global pathname to a list of directories which represent a
dataset by name.
"""
assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'
dset_name = 'default' if dset_name is None else dset_name
home = os.path.expanduser('~')
return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])
<|reserved_special_token_0|>
def split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,
seed=None):
if seed is not None:
random.seed(seed)
total = train_split + val_split + test_split
train_split = train_split / total
val_split = val_split / total
test_split = test_split / total
n_train = round(train_split * len(filenames))
n_val = round(val_split * len(filenames))
permutation = random.sample(filenames, len(filenames))
train_files = permutation[:n_train]
val_files = permutation[n_train:n_train + n_val]
test_files = permutation[n_train + n_val:]
return train_files, val_files, test_files
<|reserved_special_token_0|>
def save_samples(samples, output_prefix='sample'):
"""Saves a list of samples to ply files (with h5 labels)"""
for i, vertices in enumerate(samples):
vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)
if os.path.dirname(vertex_fname) == '':
vertex_fname = './' + vertex_fname
mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fetch_dset_dirs(dset_name=None):
"""
Finds the global pathname to a list of directories which represent a
dataset by name.
"""
assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'
dset_name = 'default' if dset_name is None else dset_name
home = os.path.expanduser('~')
return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])
def files_from_dir(dirname, exts=['obj', 'h5']):
"""
Searches a directory for a set of extensions and returns the files
matching those extensions, sorted by basename
"""
filenames = list()
for ext in exts:
ext_expr = os.path.join(dirname, f'*.{ext}')
filenames.extend(glob.glob(ext_expr))
return sorted(filenames, key=os.path.basename)
def split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,
seed=None):
if seed is not None:
random.seed(seed)
total = train_split + val_split + test_split
train_split = train_split / total
val_split = val_split / total
test_split = test_split / total
n_train = round(train_split * len(filenames))
n_val = round(val_split * len(filenames))
permutation = random.sample(filenames, len(filenames))
train_files = permutation[:n_train]
val_files = permutation[n_train:n_train + n_val]
test_files = permutation[n_train + n_val:]
return train_files, val_files, test_files
<|reserved_special_token_0|>
def save_samples(samples, output_prefix='sample'):
"""Saves a list of samples to ply files (with h5 labels)"""
for i, vertices in enumerate(samples):
vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)
if os.path.dirname(vertex_fname) == '':
vertex_fname = './' + vertex_fname
mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)
<|reserved_special_token_1|>
__doc__ = """
Dataset Module Utilities - mostly for handling files and datasets
"""
<|reserved_special_token_0|>
SVEN_BASE = 'seungmount/research/svenmd'
NICK_BASE = 'seungmount/research/Nick/'
BOTH_BASE = 'seungmount/research/nick_and_sven'
DATASET_DIRS = {'orig_full_cells': [
f'{SVEN_BASE}/pointnet_axoness_gt_180223/'], 'soma_vs_rest': [
f'{SVEN_BASE}/pointnet_soma_masked_180401'], 'orphans': [
f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',
f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/'], 'orphans2': [
f'{NICK_BASE}/pointnet/orphan_dataset/train_val_axons',
f'{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/'],
'orphan_axons': [f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/'],
'orphan_axons_refined': [
f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308_refined/'],
'pinky100_orphan_dends': [f'{BOTH_BASE}/data/180920_orphan_dends/'],
'orphan_axons_pinky100': [f'{SVEN_BASE}/InhAnalysis/meshes_put_axon/'],
'fish_refined': [f'{SVEN_BASE}/180831_meshes_ashwin_refined/'],
'full_cells_unrefined': [
f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401'],
'full_cells_refined': [
f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401_refined/'],
'pinky100_orphan_dend_features': [
f'{BOTH_BASE}/nick_archive/p100_dend_outer/inference/proj32/'],
'pinky100_orphan_dend_features_32': [
f'{BOTH_BASE}/nick_archive/p100_dend_outer_32/inference/'], 'default':
[f'{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/',
f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',
f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/']}
def fetch_dset_dirs(dset_name=None):
"""
Finds the global pathname to a list of directories which represent a
dataset by name.
"""
assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'
dset_name = 'default' if dset_name is None else dset_name
home = os.path.expanduser('~')
return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])
def files_from_dir(dirname, exts=['obj', 'h5']):
"""
Searches a directory for a set of extensions and returns the files
matching those extensions, sorted by basename
"""
filenames = list()
for ext in exts:
ext_expr = os.path.join(dirname, f'*.{ext}')
filenames.extend(glob.glob(ext_expr))
return sorted(filenames, key=os.path.basename)
def split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,
seed=None):
if seed is not None:
random.seed(seed)
total = train_split + val_split + test_split
train_split = train_split / total
val_split = val_split / total
test_split = test_split / total
n_train = round(train_split * len(filenames))
n_val = round(val_split * len(filenames))
permutation = random.sample(filenames, len(filenames))
train_files = permutation[:n_train]
val_files = permutation[n_train:n_train + n_val]
test_files = permutation[n_train + n_val:]
return train_files, val_files, test_files
def pull_n_samples(dset, n):
"""Pulls n random samples from a dataset object"""
return list(dset[i] for i in random.sample(range(len(dset)), n))
def save_samples(samples, output_prefix='sample'):
"""Saves a list of samples to ply files (with h5 labels)"""
for i, vertices in enumerate(samples):
vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)
if os.path.dirname(vertex_fname) == '':
vertex_fname = './' + vertex_fname
mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)
<|reserved_special_token_1|>
__doc__ = """
Dataset Module Utilities - mostly for handling files and datasets
"""
import glob
import os
import random
from meshparty import mesh_io
SVEN_BASE = 'seungmount/research/svenmd'
NICK_BASE = 'seungmount/research/Nick/'
BOTH_BASE = 'seungmount/research/nick_and_sven'
DATASET_DIRS = {'orig_full_cells': [
f'{SVEN_BASE}/pointnet_axoness_gt_180223/'], 'soma_vs_rest': [
f'{SVEN_BASE}/pointnet_soma_masked_180401'], 'orphans': [
f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',
f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/'], 'orphans2': [
f'{NICK_BASE}/pointnet/orphan_dataset/train_val_axons',
f'{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/'],
'orphan_axons': [f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/'],
'orphan_axons_refined': [
f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308_refined/'],
'pinky100_orphan_dends': [f'{BOTH_BASE}/data/180920_orphan_dends/'],
'orphan_axons_pinky100': [f'{SVEN_BASE}/InhAnalysis/meshes_put_axon/'],
'fish_refined': [f'{SVEN_BASE}/180831_meshes_ashwin_refined/'],
'full_cells_unrefined': [
f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401'],
'full_cells_refined': [
f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401_refined/'],
'pinky100_orphan_dend_features': [
f'{BOTH_BASE}/nick_archive/p100_dend_outer/inference/proj32/'],
'pinky100_orphan_dend_features_32': [
f'{BOTH_BASE}/nick_archive/p100_dend_outer_32/inference/'], 'default':
[f'{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/',
f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',
f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/']}
def fetch_dset_dirs(dset_name=None):
"""
Finds the global pathname to a list of directories which represent a
dataset by name.
"""
assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'
dset_name = 'default' if dset_name is None else dset_name
home = os.path.expanduser('~')
return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])
def files_from_dir(dirname, exts=['obj', 'h5']):
"""
Searches a directory for a set of extensions and returns the files
matching those extensions, sorted by basename
"""
filenames = list()
for ext in exts:
ext_expr = os.path.join(dirname, f'*.{ext}')
filenames.extend(glob.glob(ext_expr))
return sorted(filenames, key=os.path.basename)
def split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,
seed=None):
if seed is not None:
random.seed(seed)
total = train_split + val_split + test_split
train_split = train_split / total
val_split = val_split / total
test_split = test_split / total
n_train = round(train_split * len(filenames))
n_val = round(val_split * len(filenames))
permutation = random.sample(filenames, len(filenames))
train_files = permutation[:n_train]
val_files = permutation[n_train:n_train + n_val]
test_files = permutation[n_train + n_val:]
return train_files, val_files, test_files
def pull_n_samples(dset, n):
"""Pulls n random samples from a dataset object"""
return list(dset[i] for i in random.sample(range(len(dset)), n))
def save_samples(samples, output_prefix='sample'):
"""Saves a list of samples to ply files (with h5 labels)"""
for i, vertices in enumerate(samples):
vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)
if os.path.dirname(vertex_fname) == '':
vertex_fname = './' + vertex_fname
mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)
<|reserved_special_token_1|>
__doc__ = """
Dataset Module Utilities - mostly for handling files and datasets
"""
import glob
import os
import random
from meshparty import mesh_io
# Datasets -----------------------
SVEN_BASE = "seungmount/research/svenmd"
NICK_BASE = "seungmount/research/Nick/"
BOTH_BASE = "seungmount/research/nick_and_sven"
DATASET_DIRS = {
"orig_full_cells": [f"{SVEN_BASE}/pointnet_axoness_gt_180223/"],
"soma_vs_rest": [f"{SVEN_BASE}/pointnet_soma_masked_180401"],
"orphans": [f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/",
f"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/"],
"orphans2": [f"{NICK_BASE}/pointnet/orphan_dataset/train_val_axons",
f"{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/"],
"orphan_axons": [f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/"],
"orphan_axons_refined": [(f"{SVEN_BASE}"
"/pointnet_orphan_axons_gt_180308_refined/")],
"pinky100_orphan_dends": [(f"{BOTH_BASE}/data/180920_orphan_dends/")],
"orphan_axons_pinky100": [(f"{SVEN_BASE}/InhAnalysis/meshes_put_axon/")],
"fish_refined": [f"{SVEN_BASE}/180831_meshes_ashwin_refined/"],
"full_cells_unrefined": [(f"{SVEN_BASE}"
"/pointnet_full_semantic_labels"
"_masked_180401")],
"full_cells_refined": [(f"{SVEN_BASE}"
"/pointnet_full_semantic_labels"
"_masked_180401_refined/")],
"pinky100_orphan_dend_features": [(f"{BOTH_BASE}"
"/nick_archive/p100_dend_outer"
"/inference/proj32/")],
"pinky100_orphan_dend_features_32": [(f"{BOTH_BASE}"
"/nick_archive/p100_dend_outer_32"
"/inference/")],
"default": [f"{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/",
f"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/",
f"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/"]
}
# --------------------------------
def fetch_dset_dirs(dset_name=None):
"""
Finds the global pathname to a list of directories which represent a
dataset by name.
"""
assert (dset_name is None) or (dset_name in DATASET_DIRS), "invalid name"
dset_name = "default" if dset_name is None else dset_name
home = os.path.expanduser("~")
return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])
def files_from_dir(dirname, exts=["obj", "h5"]):
"""
Searches a directory for a set of extensions and returns the files
matching those extensions, sorted by basename
"""
filenames = list()
for ext in exts:
ext_expr = os.path.join(dirname, f"*.{ext}")
filenames.extend(glob.glob(ext_expr))
return sorted(filenames, key=os.path.basename)
def split_files(filenames, train_split=0.8,
val_split=0.1, test_split=0.1, seed=None):
if seed is not None:
random.seed(seed)
# Normalizing splits for arbitrary values
total = train_split + val_split + test_split
train_split = train_split / total
val_split = val_split / total
test_split = test_split / total
n_train = round(train_split * len(filenames))
n_val = round(val_split * len(filenames))
permutation = random.sample(filenames, len(filenames))
train_files = permutation[:n_train]
val_files = permutation[n_train:(n_train+n_val)]
test_files = permutation[(n_train+n_val):]
return train_files, val_files, test_files
# Helper functions for testing (e.g. sample.py)
def pull_n_samples(dset, n):
"""Pulls n random samples from a dataset object"""
return list(dset[i] for i in random.sample(range(len(dset)), n))
def save_samples(samples, output_prefix="sample"):
"""Saves a list of samples to ply files (with h5 labels)"""
for (i, vertices) in enumerate(samples):
vertex_fname = "{pref}{i}_vertices.ply".format(pref=output_prefix, i=i)
if os.path.dirname(vertex_fname) == "":
vertex_fname = "./" + vertex_fname
mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)
|
flexible
|
{
"blob_id": "fd0db093b72dad4657d71788405fcca4ba55daff",
"index": 8529,
"step-1": "<mask token>\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\n<mask token>\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\n<mask token>\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n",
"step-2": "<mask token>\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=['obj', 'h5']):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f'*.{ext}')\n filenames.extend(glob.glob(ext_expr))\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\n<mask token>\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n",
"step-3": "__doc__ = \"\"\"\nDataset Module Utilities - mostly for handling files and datasets\n\"\"\"\n<mask token>\nSVEN_BASE = 'seungmount/research/svenmd'\nNICK_BASE = 'seungmount/research/Nick/'\nBOTH_BASE = 'seungmount/research/nick_and_sven'\nDATASET_DIRS = {'orig_full_cells': [\n f'{SVEN_BASE}/pointnet_axoness_gt_180223/'], 'soma_vs_rest': [\n f'{SVEN_BASE}/pointnet_soma_masked_180401'], 'orphans': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/'], 'orphans2': [\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_axons',\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/'],\n 'orphan_axons': [f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/'],\n 'orphan_axons_refined': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308_refined/'],\n 'pinky100_orphan_dends': [f'{BOTH_BASE}/data/180920_orphan_dends/'],\n 'orphan_axons_pinky100': [f'{SVEN_BASE}/InhAnalysis/meshes_put_axon/'],\n 'fish_refined': [f'{SVEN_BASE}/180831_meshes_ashwin_refined/'],\n 'full_cells_unrefined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401'],\n 'full_cells_refined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401_refined/'],\n 'pinky100_orphan_dend_features': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer/inference/proj32/'],\n 'pinky100_orphan_dend_features_32': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer_32/inference/'], 'default':\n [f'{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/',\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/']}\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=['obj', 'h5']):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f'*.{ext}')\n filenames.extend(glob.glob(ext_expr))\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\ndef pull_n_samples(dset, n):\n \"\"\"Pulls n random samples from a dataset object\"\"\"\n return list(dset[i] for i in random.sample(range(len(dset)), n))\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n",
"step-4": "__doc__ = \"\"\"\nDataset Module Utilities - mostly for handling files and datasets\n\"\"\"\nimport glob\nimport os\nimport random\nfrom meshparty import mesh_io\nSVEN_BASE = 'seungmount/research/svenmd'\nNICK_BASE = 'seungmount/research/Nick/'\nBOTH_BASE = 'seungmount/research/nick_and_sven'\nDATASET_DIRS = {'orig_full_cells': [\n f'{SVEN_BASE}/pointnet_axoness_gt_180223/'], 'soma_vs_rest': [\n f'{SVEN_BASE}/pointnet_soma_masked_180401'], 'orphans': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/'], 'orphans2': [\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_axons',\n f'{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/'],\n 'orphan_axons': [f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/'],\n 'orphan_axons_refined': [\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308_refined/'],\n 'pinky100_orphan_dends': [f'{BOTH_BASE}/data/180920_orphan_dends/'],\n 'orphan_axons_pinky100': [f'{SVEN_BASE}/InhAnalysis/meshes_put_axon/'],\n 'fish_refined': [f'{SVEN_BASE}/180831_meshes_ashwin_refined/'],\n 'full_cells_unrefined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401'],\n 'full_cells_refined': [\n f'{SVEN_BASE}/pointnet_full_semantic_labels_masked_180401_refined/'],\n 'pinky100_orphan_dend_features': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer/inference/proj32/'],\n 'pinky100_orphan_dend_features_32': [\n f'{BOTH_BASE}/nick_archive/p100_dend_outer_32/inference/'], 'default':\n [f'{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/',\n f'{SVEN_BASE}/pointnet_orphan_axons_gt_180308/',\n f'{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/']}\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert dset_name is None or dset_name in DATASET_DIRS, 'invalid name'\n dset_name = 'default' if dset_name is None else dset_name\n home = os.path.expanduser('~')\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=['obj', 'h5']):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f'*.{ext}')\n filenames.extend(glob.glob(ext_expr))\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8, val_split=0.1, test_split=0.1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n total = train_split + val_split + test_split\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n permutation = random.sample(filenames, len(filenames))\n train_files = permutation[:n_train]\n val_files = permutation[n_train:n_train + n_val]\n test_files = permutation[n_train + n_val:]\n return train_files, val_files, test_files\n\n\ndef pull_n_samples(dset, n):\n \"\"\"Pulls n random samples from a dataset object\"\"\"\n return list(dset[i] for i in random.sample(range(len(dset)), n))\n\n\ndef save_samples(samples, output_prefix='sample'):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n for i, vertices in enumerate(samples):\n vertex_fname = '{pref}{i}_vertices.ply'.format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == '':\n vertex_fname = './' + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n",
"step-5": "__doc__ = \"\"\"\nDataset Module Utilities - mostly for handling files and datasets\n\"\"\"\nimport glob\nimport os\nimport random\n\nfrom meshparty import mesh_io\n\n\n# Datasets -----------------------\nSVEN_BASE = \"seungmount/research/svenmd\"\nNICK_BASE = \"seungmount/research/Nick/\"\nBOTH_BASE = \"seungmount/research/nick_and_sven\"\nDATASET_DIRS = {\n \"orig_full_cells\": [f\"{SVEN_BASE}/pointnet_axoness_gt_180223/\"],\n\n \"soma_vs_rest\": [f\"{SVEN_BASE}/pointnet_soma_masked_180401\"],\n\n \"orphans\": [f\"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/\",\n f\"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/\"],\n\n \"orphans2\": [f\"{NICK_BASE}/pointnet/orphan_dataset/train_val_axons\",\n f\"{NICK_BASE}/pointnet/orphan_dataset/train_val_dends/\"],\n\n \"orphan_axons\": [f\"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/\"],\n\n \"orphan_axons_refined\": [(f\"{SVEN_BASE}\"\n \"/pointnet_orphan_axons_gt_180308_refined/\")],\n\n \"pinky100_orphan_dends\": [(f\"{BOTH_BASE}/data/180920_orphan_dends/\")],\n\n \"orphan_axons_pinky100\": [(f\"{SVEN_BASE}/InhAnalysis/meshes_put_axon/\")],\n\n \"fish_refined\": [f\"{SVEN_BASE}/180831_meshes_ashwin_refined/\"],\n\n \"full_cells_unrefined\": [(f\"{SVEN_BASE}\"\n \"/pointnet_full_semantic_labels\"\n \"_masked_180401\")],\n\n \"full_cells_refined\": [(f\"{SVEN_BASE}\"\n \"/pointnet_full_semantic_labels\"\n \"_masked_180401_refined/\")],\n\n \"pinky100_orphan_dend_features\": [(f\"{BOTH_BASE}\"\n \"/nick_archive/p100_dend_outer\"\n \"/inference/proj32/\")],\n\n \"pinky100_orphan_dend_features_32\": [(f\"{BOTH_BASE}\"\n \"/nick_archive/p100_dend_outer_32\"\n \"/inference/\")],\n\n \"default\": [f\"{SVEN_BASE}/pointnet_axoness_gt_rfc_based_masked_180322/\",\n f\"{SVEN_BASE}/pointnet_orphan_axons_gt_180308/\",\n f\"{SVEN_BASE}/pointnet_orphan_dendrites_gt_180308/\"]\n}\n# --------------------------------\n\n\ndef fetch_dset_dirs(dset_name=None):\n \"\"\"\n Finds the global pathname to a list of directories which represent a\n dataset by name.\n \"\"\"\n assert (dset_name is None) or (dset_name in DATASET_DIRS), \"invalid name\"\n\n dset_name = \"default\" if dset_name is None else dset_name\n\n home = os.path.expanduser(\"~\")\n\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])\n\n\ndef files_from_dir(dirname, exts=[\"obj\", \"h5\"]):\n \"\"\"\n Searches a directory for a set of extensions and returns the files\n matching those extensions, sorted by basename\n \"\"\"\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f\"*.{ext}\")\n filenames.extend(glob.glob(ext_expr))\n\n return sorted(filenames, key=os.path.basename)\n\n\ndef split_files(filenames, train_split=0.8,\n val_split=0.1, test_split=0.1, seed=None):\n\n if seed is not None:\n random.seed(seed)\n\n # Normalizing splits for arbitrary values\n total = train_split + val_split + test_split\n\n train_split = train_split / total\n val_split = val_split / total\n test_split = test_split / total\n\n n_train = round(train_split * len(filenames))\n n_val = round(val_split * len(filenames))\n\n permutation = random.sample(filenames, len(filenames))\n\n train_files = permutation[:n_train]\n val_files = permutation[n_train:(n_train+n_val)]\n test_files = permutation[(n_train+n_val):]\n\n return train_files, val_files, test_files\n\n\n# Helper functions for testing (e.g. sample.py)\ndef pull_n_samples(dset, n):\n \"\"\"Pulls n random samples from a dataset object\"\"\"\n return list(dset[i] for i in random.sample(range(len(dset)), n))\n\n\ndef save_samples(samples, output_prefix=\"sample\"):\n \"\"\"Saves a list of samples to ply files (with h5 labels)\"\"\"\n\n for (i, vertices) in enumerate(samples):\n vertex_fname = \"{pref}{i}_vertices.ply\".format(pref=output_prefix, i=i)\n if os.path.dirname(vertex_fname) == \"\":\n vertex_fname = \"./\" + vertex_fname\n mesh_io.Mesh.write_vertices_ply(None, vertex_fname, coords=vertices)\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
from concurrent import futures
import time
import math
import logging
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import sys
sys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
import cv2
from PRNet.utils.cv_plot import plot_kpt, plot_vertices
import pymesh
import threading
from Queue import Queue
from tensorflow.python.framework import tensor_util
import numpy as np
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=22500,
output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,350)
fontScale = 1
fontColor = (255,255,255)
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ""
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
# Display the resulting frame
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img,display_subtitle,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
cv2.imshow('frame',show_img)
# Press Q on keyboard to stop recording
if cv2.waitKey(1) & 0xFF == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if "vertices" in request.inputs:
print("vertices")
vertices = tensor_util.MakeNdarray(request.inputs["vertices"])
q.put(vertices)
elif "audio" in request.inputs:
print('audio')
# audio = tensor_util.MakeNdarray(request.inputs['audio'])
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
# print(request.inputs['audio'])
stream.write(audio)
elif "subtitle" in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs["message"].CopyFrom(tf.make_tensor_proto("OK"))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
# server.wait_for_termination()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join() # block until all tasks are donet
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
|
normal
|
{
"blob_id": "0ec5d6ce11851a577046cf73cf98c91b6dfb9f67",
"index": 1550,
"step-1": "<mask token>\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n<mask token>\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-3": "<mask token>\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n<mask token>\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = 10, 350\nfontScale = 1\nfontColor = 255, 255, 255\nlineType = 2\nsubtitles = Queue()\nq = Queue()\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-4": "from concurrent import futures\nimport time\nimport math\nimport logging\nimport grpc\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\nimport sys\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\nimport cv2\nfrom PRNet.utils.cv_plot import plot_kpt, plot_vertices\nimport pymesh\nimport threading\nfrom Queue import Queue\nfrom tensorflow.python.framework import tensor_util\nimport numpy as np\nimport pyaudio\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = 10, 350\nfontScale = 1\nfontColor = 255, 255, 255\nlineType = 2\nsubtitles = Queue()\nq = Queue()\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-5": "from concurrent import futures\nimport time\nimport math\nimport logging\n\nimport grpc\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nimport sys\nsys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n\nimport cv2\nfrom PRNet.utils.cv_plot import plot_kpt, plot_vertices\nimport pymesh\nimport threading\nfrom Queue import Queue\nfrom tensorflow.python.framework import tensor_util\nimport numpy as np\n\n\nimport pyaudio\n\np = pyaudio.PyAudio()\n\nstream = p.open(format=pyaudio.paInt16,\n channels=1,\n rate=22500,\n output=True)\n\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = (10,350)\nfontScale = 1\nfontColor = (255,255,255)\nlineType = 2\n\n\nsubtitles = Queue()\n\nq = Queue()\ndef worker():\n display_subtitle = \"\"\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image \n # Display the resulting frame\n\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img,display_subtitle, \n bottomLeftCornerOfText, \n font, \n fontScale,\n fontColor,\n lineType)\n cv2.imshow('frame',show_img)\n\n\n # Press Q on keyboard to stop recording\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n q.task_done()\n\n\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if \"vertices\" in request.inputs:\n print(\"vertices\")\n vertices = tensor_util.MakeNdarray(request.inputs[\"vertices\"])\n q.put(vertices)\n elif \"audio\" in request.inputs:\n print('audio')\n # audio = tensor_util.MakeNdarray(request.inputs['audio'])\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n # print(request.inputs['audio'])\n stream.write(audio)\n elif \"subtitle\" in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n\n\n\n\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs[\"message\"].CopyFrom(tf.make_tensor_proto(\"OK\"))\n return dumbresult\n\n\n\ndef serve():\n\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n # server.wait_for_termination()\n\n\n\n\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n q.join() # block until all tasks are donet\n subtitles.join()\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def printTest2():
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Top of Start of program ' + thisProgramIs)
print(' ')
return
<|reserved_special_token_0|>
def menuInit(cmdArray):
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('start of menuInit of program ' + thisProgramIs)
print(' ')
return
def main(argv=None):
if argv is None:
argv = sys.argv
lenArgv = len(sys.argv)
pyScriptProgramName = sys.argv[0]
print(' ')
print(
'# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Start of program in Main ' + thisProgramIs)
print(' ')
import curses
import getpass
import os
import shutil
import subprocess
import pprint
from subprocess import Popen, PIPE, STDOUT
import numpy
import pygame
import tkinter
print(' ')
reEntered = input(
'Stop chosen, all RAM data will be lost, are you sure? y or n: ')
if reEntered == 'y' or reEntered == 'Y':
return
else:
print('Staying for more entry. ')
w = 5
h = 99
cmdArrayWidth = w
cmdArrayHeight = h
cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(
cmdArrayHeight)}
menuInit(cmdArray)
out_bytes = ' '
print(('# jcj-jcj-jcj-' + thisProgramIs +
' Function Main is ending with sys.exit(): ', out_bytes))
print(' ')
print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
print(' ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def printTest2():
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Top of Start of program ' + thisProgramIs)
print(' ')
return
def printTest2():
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('printTest2() ' + thisProgramIs)
print(' ')
return
def menuInit(cmdArray):
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('start of menuInit of program ' + thisProgramIs)
print(' ')
return
def main(argv=None):
if argv is None:
argv = sys.argv
lenArgv = len(sys.argv)
pyScriptProgramName = sys.argv[0]
print(' ')
print(
'# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Start of program in Main ' + thisProgramIs)
print(' ')
import curses
import getpass
import os
import shutil
import subprocess
import pprint
from subprocess import Popen, PIPE, STDOUT
import numpy
import pygame
import tkinter
print(' ')
reEntered = input(
'Stop chosen, all RAM data will be lost, are you sure? y or n: ')
if reEntered == 'y' or reEntered == 'Y':
return
else:
print('Staying for more entry. ')
w = 5
h = 99
cmdArrayWidth = w
cmdArrayHeight = h
cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(
cmdArrayHeight)}
menuInit(cmdArray)
out_bytes = ' '
print(('# jcj-jcj-jcj-' + thisProgramIs +
' Function Main is ending with sys.exit(): ', out_bytes))
print(' ')
print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
print(' ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def printTest2():
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Top of Start of program ' + thisProgramIs)
print(' ')
return
def printTest2():
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('printTest2() ' + thisProgramIs)
print(' ')
return
def menuInit(cmdArray):
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('start of menuInit of program ' + thisProgramIs)
print(' ')
return
def main(argv=None):
if argv is None:
argv = sys.argv
lenArgv = len(sys.argv)
pyScriptProgramName = sys.argv[0]
print(' ')
print(
'# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Start of program in Main ' + thisProgramIs)
print(' ')
import curses
import getpass
import os
import shutil
import subprocess
import pprint
from subprocess import Popen, PIPE, STDOUT
import numpy
import pygame
import tkinter
print(' ')
reEntered = input(
'Stop chosen, all RAM data will be lost, are you sure? y or n: ')
if reEntered == 'y' or reEntered == 'Y':
return
else:
print('Staying for more entry. ')
w = 5
h = 99
cmdArrayWidth = w
cmdArrayHeight = h
cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(
cmdArrayHeight)}
menuInit(cmdArray)
out_bytes = ' '
print(('# jcj-jcj-jcj-' + thisProgramIs +
' Function Main is ending with sys.exit(): ', out_bytes))
print(' ')
print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
print(' ')
if __name__ == '__main__':
sys.exit(main())
<|reserved_special_token_1|>
import sys
def printTest2():
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Top of Start of program ' + thisProgramIs)
print(' ')
return
def printTest2():
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('printTest2() ' + thisProgramIs)
print(' ')
return
def menuInit(cmdArray):
if 0 == 0:
print(' ')
print(
'# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('start of menuInit of program ' + thisProgramIs)
print(' ')
return
def main(argv=None):
if argv is None:
argv = sys.argv
lenArgv = len(sys.argv)
pyScriptProgramName = sys.argv[0]
print(' ')
print(
'# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
thisProgramIs = 'menuScrnTxt.py'
print('Start of program in Main ' + thisProgramIs)
print(' ')
import curses
import getpass
import os
import shutil
import subprocess
import pprint
from subprocess import Popen, PIPE, STDOUT
import numpy
import pygame
import tkinter
print(' ')
reEntered = input(
'Stop chosen, all RAM data will be lost, are you sure? y or n: ')
if reEntered == 'y' or reEntered == 'Y':
return
else:
print('Staying for more entry. ')
w = 5
h = 99
cmdArrayWidth = w
cmdArrayHeight = h
cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(
cmdArrayHeight)}
menuInit(cmdArray)
out_bytes = ' '
print(('# jcj-jcj-jcj-' + thisProgramIs +
' Function Main is ending with sys.exit(): ', out_bytes))
print(' ')
print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'
)
print(' ')
if __name__ == '__main__':
sys.exit(main())
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# menuScrnTxt.py
# Created on Mon Mar 8 16:17:50 2021
# @author: jcj52436999
# menuScrnTxt.py-2021-03-08-1641-just noting a general restart in efforts here
import sys
def printTest2():
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("Top of Start of program " + thisProgramIs))
print(" ")
return
def printTest2():
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("printTest2() " + thisProgramIs))
print(" ")
return
# import
def menuInit(cmdArray):
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("start of menuInit of program " + thisProgramIs))
print(" ")
return
# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the Start of main jcj-jcjjcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj
def main(argv=None):
#import sys
if argv is None:
argv = sys.argv
lenArgv = len(sys.argv)
pyScriptProgramName = sys.argv[0]
print(" ")
print("# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("Start of program in Main " + thisProgramIs))
print(" ")
# import sys
import curses
import getpass
import os
import shutil
import subprocess
import pprint
# import pformat
from subprocess import Popen, PIPE, STDOUT
# import urwid
import numpy
import pygame
import tkinter
print (" ")
# Trying to install a favorite set of Ubu software.
#tempHold = tempHold[1]
## print( tempHold )
## cmdArray = " " ;
## cmdArray = menuLineReactions[ tempHold ]();
reEntered = (input( "Stop chosen, all RAM data will be lost, are you sure? y or n: " ))
if reEntered == "y" or reEntered == "Y":
return #sys.exit() sys.exit()
else:
print( "Staying for more entry. ")
#
w = 5
h = 99
cmdArrayWidth = w
cmdArrayHeight = h
cmdArray = {( w, h): " " for w in range(cmdArrayWidth) for h in range(cmdArrayHeight)}
menuInit( cmdArray )
# out_bytes.wait()
out_bytes = " "
print(("# jcj-jcj-jcj-" + thisProgramIs + " Function Main is ending with sys.exit(): ", out_bytes))
print(" ")
print("# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
print(" ")
# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the End of main jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj
if __name__ == "__main__":
sys.exit(main())
# =============================================================================
#
# def main():
# ...
#
# if __name__ == "__main__":
# main()
#
#
# =============================================================================
|
flexible
|
{
"blob_id": "e4f7e0c40edde4aac6ba0a7529a2e028a09689ae",
"index": 7260,
"step-1": "<mask token>\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\n<mask token>\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('printTest2() ' + thisProgramIs)\n print(' ')\n return\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('printTest2() ' + thisProgramIs)\n print(' ')\n return\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import sys\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('printTest2() ' + thisProgramIs)\n print(' ')\n return\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n# menuScrnTxt.py\n# Created on Mon Mar 8 16:17:50 2021\n# @author: jcj52436999\n\n# menuScrnTxt.py-2021-03-08-1641-just noting a general restart in efforts here\n\nimport sys\n\ndef printTest2():\n \n if 0 == 0 :\n print(\" \")\n print(\"# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"Top of Start of program \" + thisProgramIs))\n print(\" \")\n return\n\n\ndef printTest2():\n\n if 0 == 0 :\n print(\" \")\n print(\"# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"printTest2() \" + thisProgramIs))\n print(\" \")\n return\n\n\n# import \n\ndef menuInit(cmdArray):\n\n if 0 == 0 :\n print(\" \")\n print(\"# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"start of menuInit of program \" + thisProgramIs))\n print(\" \") \n \n return\n\n\n# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the Start of main jcj-jcjjcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\ndef main(argv=None):\n #import sys\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv) \n pyScriptProgramName = sys.argv[0]\n\n\n print(\" \")\n print(\"# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"Start of program in Main \" + thisProgramIs))\n print(\" \")\n\n # import sys\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n # import pformat \n \n from subprocess import Popen, PIPE, STDOUT\n\n # import urwid\n import numpy\n import pygame\n import tkinter\n \n print (\" \") \n\n # Trying to install a favorite set of Ubu software.\n \n #tempHold = tempHold[1] \n ## print( tempHold )\n ## cmdArray = \" \" ; \n ## cmdArray = menuLineReactions[ tempHold ](); \n\n reEntered = (input( \"Stop chosen, all RAM data will be lost, are you sure? y or n: \" )) \n if reEntered == \"y\" or reEntered == \"Y\":\n return #sys.exit() sys.exit()\n else: \n print( \"Staying for more entry. \")\n \n # \n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h \n cmdArray = {( w, h): \" \" for w in range(cmdArrayWidth) for h in range(cmdArrayHeight)}\n\n menuInit( cmdArray )\n\n # out_bytes.wait() \n out_bytes = \" \" \n print((\"# jcj-jcj-jcj-\" + thisProgramIs + \" Function Main is ending with sys.exit(): \", out_bytes))\n\n print(\" \")\n print(\"# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n print(\" \")\n# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the End of main jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\nif __name__ == \"__main__\":\n sys.exit(main())\n\n\n# =============================================================================\n# \n# def main():\n# ...\n# \n# if __name__ == \"__main__\":\n# main()\n# \n# \n# =============================================================================\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
n = int(input())
a = sorted([int(input()) for _ in range(n)])
x = a[:n//2]
y = a[(n + 1)//2:]
ans = 0
for i in range(len(x)):
ans += abs(x[i] - y[i])
for i in range(1, len(y)):
ans += abs(x[i - 1] - y[i])
if n % 2 == 1:
ans += max(
abs(a[n // 2] - x[-1]),
abs(a[n // 2] - y[0]),
)
print(ans)
|
normal
|
{
"blob_id": "0e9d0927e8d69b0c0fad98479d47f2409c95a751",
"index": 794,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(x)):\n ans += abs(x[i] - y[i])\nfor i in range(1, len(y)):\n ans += abs(x[i - 1] - y[i])\nif n % 2 == 1:\n ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))\nprint(ans)\n",
"step-3": "n = int(input())\na = sorted([int(input()) for _ in range(n)])\nx = a[:n // 2]\ny = a[(n + 1) // 2:]\nans = 0\nfor i in range(len(x)):\n ans += abs(x[i] - y[i])\nfor i in range(1, len(y)):\n ans += abs(x[i - 1] - y[i])\nif n % 2 == 1:\n ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))\nprint(ans)\n",
"step-4": "n = int(input())\na = sorted([int(input()) for _ in range(n)])\n\nx = a[:n//2]\ny = a[(n + 1)//2:]\n\nans = 0\nfor i in range(len(x)):\n ans += abs(x[i] - y[i])\nfor i in range(1, len(y)):\n ans += abs(x[i - 1] - y[i])\nif n % 2 == 1:\n ans += max(\n abs(a[n // 2] - x[-1]),\n abs(a[n // 2] - y[0]),\n )\nprint(ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pkg_resources
from twisted.enterprise import adbapi
from twisted.internet import defer
# Start a logger with a namespace for a particular subsystem of our application.
from twisted.logger import Logger
log = Logger("database")
class Database:
def __init__(self, context, db_filename="database.sqlite"):
# Get full path and filename for database
session_files = context["session_files"]
db_filename = session_files.session_dir / db_filename
# Note if database already exists
database_exists = db_filename.is_file()
# Callback for every connection that is established to the
# database
def setup_connection(connection):
# Turn on foreign key constraints
cursor = connection.cursor()
cursor.execute("PRAGMA foreign_keys = ON;")
# # Turn on column names in rows
# import sqlite3
# connection.row_factory = sqlite3.Row
# Open a connection to the database. SQLite will create the file if
# it doesn't already exist.
dbpool = adbapi.ConnectionPool(
"sqlite3",
db_filename,
cp_openfun=setup_connection,
check_same_thread=False # See https://twistedmatrix.com/trac/ticket/3629
)
# If the database did not exist, initialise the database
if not database_exists:
print("Database requires initialisation")
self._db_ready = dbpool.runInteraction(self._initialise_database)
def on_success(data):
log.info("Database successfully initialised")
return dbpool
def on_error(data):
log.error(f"Failed to initialise the server's database: {data}")
reactor = context["reactor"]
reactor.stop()
self._db_ready.addCallback(on_success)
self._db_ready.addErrback(on_error)
else:
# Database exists already
self._db_ready = defer.Deferred()
self._db_ready.callback(dbpool)
# Check that database is the correct version
expected_version = 4
def check_version(cursor):
cursor.execute("SELECT version FROM Version")
row = cursor.fetchone()
if row is None:
raise Exception("No version found in Version table of database")
if row[0] == expected_version:
log.info(f"Server database version {expected_version}")
return dbpool
else:
reactor = context["reactor"]
reactor.stop()
raise Exception(f"Database version ({row[0]}) did not match expected version ({expected_version}). Terminating.")
def run_check_version(dbpool):
return dbpool.runInteraction(check_version)
d = self.get_dbpool()
d.addCallback(run_check_version)
def on_error(error):
log.error("Failed to verify the database: "+str(error))
reactor = context["reactor"]
reactor.stop()
d.addErrback(on_error)
# Initialise the database structure from instructions in file
def _initialise_database(self, cursor):
log.info("Initialising database")
initialisation_commands_filename = \
pkg_resources.resource_filename(
"singtserver",
"database.sql"
)
f = open(initialisation_commands_filename, "r")
initialisation_commands = f.read()
return cursor.executescript(initialisation_commands)
def get_dbpool(self):
d = defer.Deferred()
def db_ready(db):
d.callback(db)
return db
self._db_ready.addCallback(db_ready)
return d
def get_combination(self, track_id=None, take_ids=[]):
# Sanity check arguments
if (track_id is None
and len(take_ids) == 0):
raise Exception(
"Getting a combination from the database requires "+
"at least a Track ID or at least one Take ID"
)
# Get combination from database.
# See answers to https://stackoverflow.com/questions/63356820/sql-select-from-many-to-one
# and https://stackoverflow.com/a/5766293/562930
def get_combo(cursor):
if track_id is None:
assert len(take_ids) > 0
sql = (
"SELECT id\n"+
"FROM Combinations\n"+
"WHERE backingTrackId IS NULL\n"+
" AND id IN\n"+
" (SELECT combinationId\n"+
" FROM CombinationsDetail\n"+
" GROUP BY combinationId\n" +
" HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0) = ?".format(
seq=",".join(["?"]*len(take_ids))
)
)
cursor.execute(
sql,
(*take_ids, len(take_ids))
)
elif len(take_ids) == 0:
sql = (
"SELECT id\n"+
"FROM Combinations\n"+
"WHERE backingTrackId = ?\n"+
" AND NOT EXISTS\n"+
" (SELECT * \n"+
" FROM CombinationsDetail\n"+
" WHERE combinationId = Combinations.id)"
)
cursor.execute(
sql,
(track_id, )
)
else:
sql = ("SELECT id\n"+
"FROM Combinations\n"+
"WHERE backingTrackId = ?\n"+
" AND id IN\n"+
" (SELECT combinationId\n"+
" FROM CombinationsDetail\n"+
" GROUP BY combinationId\n" +
" HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0 END) = ?)").format(
seq=",".join(['?']*len(take_ids))
)
cursor.execute(
sql,
(track_id, *take_ids, len(take_ids))
)
# Although there should be at most only one combo id that
# matches the track and takes specification, even if there
# are more than one, we'll just return the first (or None
# if there aren't any).
row = cursor.fetchone()
if row is None:
return None
combo_id = row[0]
return combo_id
def when_ready(dbpool):
return dbpool.runInteraction(get_combo)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_success(data):
log.info("Successfully added combination to database; combination id: "+str(data))
return data
d.addCallback(on_success)
def on_error(error):
log.error("Failed to add combination to the database: "+str(error))
raise Exception("Failed to add combination to the database")
d.addErrback(on_error)
return d
def add_combination(self, track_id=None, take_ids=[]):
"""Adds combination into database.
Returns combo_id.
"""
log.info(f"Adding combination to database with track id = {track_id} and take_ids = {take_ids}")
# Sanity check arguments
if (track_id is None
and len(take_ids) == 0):
raise Exception(
"Adding a combination to the database requires "+
"at least a Track ID or at least one Take ID"
)
# Create combination in database
def add_combo(cursor):
# Create audio id
cursor.execute("INSERT INTO AudioIdentifiers DEFAULT VALUES")
audio_id = cursor.lastrowid
print("track_id:", track_id)
cursor.execute(
"INSERT INTO Combinations (audioId, backingTrackId) VALUES (?, ?)",
(audio_id, track_id)
)
combo_id = cursor.lastrowid
for take_id in take_ids:
cursor.execute(
"INSERT INTO CombinationsDetail (combinationId, takeId) "+
"VALUES (?,?)",
(combo_id, take_id)
)
return combo_id
def when_ready(dbpool):
return dbpool.runInteraction(add_combo)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_success(data):
log.info("Successfully added combination to database; combination id: "+str(data))
return data
def on_error(error):
log.error("Failed to add combination to the database: "+str(error))
raise Exception("Failed to add combination to the database")
d.addCallback(on_success)
d.addErrback(on_error)
return d
def get_track_audio_id(self, track_id):
"""Returns track's audio id or None."""
def execute_sql(cursor):
cursor.execute("SELECT audioId FROM BackingTracks WHERE id = ?",
(track_id,))
results = cursor.fetchone()
if results is None:
return None
else:
return results[0]
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn("Failed to get audio ID for track id ({track_id}): "+
str(error)
)
return error
d.addErrback(on_error)
return d
def get_take_audio_id(self, take_id):
"""Returns take's audio id or None."""
def execute_sql(cursor):
cursor.execute("SELECT audioId FROM Takes WHERE id = ?",
(take_id,))
results = cursor.fetchone()
if results is None:
return None
else:
return results[0]
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn("Failed to get audio ID for take id ({take_id}): "+
str(error)
)
return error
d.addErrback(on_error)
return d
def assign_participant(self, client_id, name):
"""Assigns the name to the client id."""
def execute_sql(cursor):
# First, check if the id already exists
cursor.execute(
"SELECT participantName FROM Participants WHERE id = ?",
(client_id,)
)
row = cursor.fetchone()
if row is None:
# We don't currently have this ID, insert it
cursor.execute(
"INSERT INTO Participants (id, participantName) "+
"VALUES (?, ?)",
(client_id, name)
)
return client_id
# Otherwise, a row does already exist
current_name = row[0]
if name == current_name:
# We have nothing to do, the database is already
# correct
return client_id
# Otherwise, we need to update the database
cursor.execute(
"UPDATE Participants SET participantName = ? WHERE id = ?",
(name, client_id)
)
return client_id
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to add participant given name '{name}' and id '{client_id}': "+
str(error)
)
return error
d.addErrback(on_error)
return d
def get_participants(self):
def execute_sql(cursor):
cursor.execute("SELECT id, participantName FROM Participants")
rows = cursor.fetchall()
results = [{"id":id_, "name":name} for id_, name in rows]
return results
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to get participant list: "+
str(error)
)
return error
d.addErrback(on_error)
return d
def get_audio_ids_from_combination_id(self, combination_id):
def execute_sql(cursor):
# Get Track ID. There should be either zero or one, but
# not more.
cursor.execute(
"SELECT BackingTracks.audioId\n"+
"FROM Combinations\n"+
"LEFT JOIN BackingTracks\n"+
"ON Combinations.backingTrackId = BackingTracks.id\n"+
"WHERE combinations.id = ?",
(combination_id,)
)
rows = cursor.fetchall()
if len(rows) == 0:
# We don't have a backing track; that's fine, move on
# to the takes.
backing_audio_ids = []
elif len(rows) == 1:
# We have one backing track
row = rows[0]
audio_id = row[0]
backing_audio_ids = [audio_id]
else:
# We have more than one backing track; error.
raise Exception(
f"More than one backing track matched "+
f"combination id {combination_id}; this "+
f"shouldn't be possible"
)
# Get the Take IDs. There may be many of these. But if
# there wasn't a backing track id, then there needs to be
# at least one Take ID.
cursor.execute(
"SELECT audioId\n"+
"FROM CombinationsDetail\n"+
"LEFT JOIN Takes\n"+
"ON CombinationsDetail.id = Takes.combinationId\n"+
"WHERE CombinationsDetail.combinationId = ?",
(combination_id,)
)
rows = cursor.fetchall()
if len(rows) == 0:
# This is only as issue if we don't have any backing
# tracks either
if len(backing_audio_ids) == 0:
raise Exception(
f"We have neither a backing track nor takes "+
f"for the given combination id ({combination_id});"+
f"this shouldn't be possible"
)
else:
# Add the Take IDs to the list
takes_audio_ids = [row[0] for row in rows]
backing_audio_ids += takes_audio_ids
return backing_audio_ids
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to get backing audio ids from combination id: "+
str(error)
)
return error
d.addErrback(on_error)
return d
def add_take(self, take_name, combination_id):
def execute_sql(cursor):
# Create audio id
cursor.execute("INSERT INTO AudioIdentifiers DEFAULT VALUES")
audio_id = cursor.lastrowid
# Create take
cursor.execute(
"INSERT INTO Takes (audioId, combinationId, takeName, complete) "+
"VALUES (?, ?, ?, 0)",
(audio_id, combination_id, take_name)
)
take_id = cursor.lastrowid
return take_id
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to add take: "+
str(error)
)
return error
d.addErrback(on_error)
return d
def add_recording_audio_ids(self, take_id, participants):
def execute_sql(cursor):
audio_ids = {}
for participant_id in participants:
# Create audio id
cursor.execute("INSERT INTO AudioIdentifiers DEFAULT VALUES")
audio_id = cursor.lastrowid
# Add entry into Recordings
cursor.execute(
"INSERT INTO Recordings "+
"(audioId, participantId, takeId, complete) "+
"VALUES (?, ?, ?, 0)",
(audio_id, participant_id, take_id)
)
audio_ids[participant_id] = audio_id
return audio_ids
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to add recordings for participants: "+
str(error)
)
return error
d.addErrback(on_error)
return d
|
normal
|
{
"blob_id": "45c1510d19af0979326a1b9975ec363b0b80a291",
"index": 8123,
"step-1": "<mask token>\n\n\nclass Database:\n\n def __init__(self, context, db_filename='database.sqlite'):\n session_files = context['session_files']\n db_filename = session_files.session_dir / db_filename\n database_exists = db_filename.is_file()\n\n def setup_connection(connection):\n cursor = connection.cursor()\n cursor.execute('PRAGMA foreign_keys = ON;')\n dbpool = adbapi.ConnectionPool('sqlite3', db_filename, cp_openfun=\n setup_connection, check_same_thread=False)\n if not database_exists:\n print('Database requires initialisation')\n self._db_ready = dbpool.runInteraction(self._initialise_database)\n\n def on_success(data):\n log.info('Database successfully initialised')\n return dbpool\n\n def on_error(data):\n log.error(f\"Failed to initialise the server's database: {data}\"\n )\n reactor = context['reactor']\n reactor.stop()\n self._db_ready.addCallback(on_success)\n self._db_ready.addErrback(on_error)\n else:\n self._db_ready = defer.Deferred()\n self._db_ready.callback(dbpool)\n expected_version = 4\n\n def check_version(cursor):\n cursor.execute('SELECT version FROM Version')\n row = cursor.fetchone()\n if row is None:\n raise Exception('No version found in Version table of database'\n )\n if row[0] == expected_version:\n log.info(f'Server database version {expected_version}')\n return dbpool\n else:\n reactor = context['reactor']\n reactor.stop()\n raise Exception(\n f'Database version ({row[0]}) did not match expected version ({expected_version}). Terminating.'\n )\n\n def run_check_version(dbpool):\n return dbpool.runInteraction(check_version)\n d = self.get_dbpool()\n d.addCallback(run_check_version)\n\n def on_error(error):\n log.error('Failed to verify the database: ' + str(error))\n reactor = context['reactor']\n reactor.stop()\n d.addErrback(on_error)\n\n def _initialise_database(self, cursor):\n log.info('Initialising database')\n initialisation_commands_filename = pkg_resources.resource_filename(\n 'singtserver', 'database.sql')\n f = open(initialisation_commands_filename, 'r')\n initialisation_commands = f.read()\n return cursor.executescript(initialisation_commands)\n <mask token>\n <mask token>\n <mask token>\n\n def get_track_audio_id(self, track_id):\n \"\"\"Returns track's audio id or None.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute('SELECT audioId FROM BackingTracks WHERE id = ?',\n (track_id,))\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get audio ID for track id ({track_id}): ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n <mask token>\n\n def assign_participant(self, client_id, name):\n \"\"\"Assigns the name to the client id.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute(\n 'SELECT participantName FROM Participants WHERE id = ?', (\n client_id,))\n row = cursor.fetchone()\n if row is None:\n cursor.execute(\n 'INSERT INTO Participants (id, participantName) ' +\n 'VALUES (?, ?)', (client_id, name))\n return client_id\n current_name = row[0]\n if name == current_name:\n return client_id\n cursor.execute(\n 'UPDATE Participants SET participantName = ? WHERE id = ?',\n (name, client_id))\n return client_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to add participant given name '{name}' and id '{client_id}': \"\n + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_participants(self):\n\n def execute_sql(cursor):\n cursor.execute('SELECT id, participantName FROM Participants')\n rows = cursor.fetchall()\n results = [{'id': id_, 'name': name} for id_, name in rows]\n return results\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get participant list: ' + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_audio_ids_from_combination_id(self, combination_id):\n\n def execute_sql(cursor):\n cursor.execute('SELECT BackingTracks.audioId\\n' +\n 'FROM Combinations\\n' + \"\"\"LEFT JOIN BackingTracks\n\"\"\" +\n 'ON Combinations.backingTrackId = BackingTracks.id\\n' +\n 'WHERE combinations.id = ?', (combination_id,))\n rows = cursor.fetchall()\n if len(rows) == 0:\n backing_audio_ids = []\n elif len(rows) == 1:\n row = rows[0]\n audio_id = row[0]\n backing_audio_ids = [audio_id]\n else:\n raise Exception(f'More than one backing track matched ' +\n f'combination id {combination_id}; this ' +\n f\"shouldn't be possible\")\n cursor.execute('SELECT audioId\\n' + 'FROM CombinationsDetail\\n' +\n 'LEFT JOIN Takes\\n' +\n \"\"\"ON CombinationsDetail.id = Takes.combinationId\n\"\"\" +\n 'WHERE CombinationsDetail.combinationId = ?', (combination_id,)\n )\n rows = cursor.fetchall()\n if len(rows) == 0:\n if len(backing_audio_ids) == 0:\n raise Exception(\n f'We have neither a backing track nor takes ' +\n f'for the given combination id ({combination_id});' +\n f\"this shouldn't be possible\")\n else:\n takes_audio_ids = [row[0] for row in rows]\n backing_audio_ids += takes_audio_ids\n return backing_audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n 'Failed to get backing audio ids from combination id: ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n <mask token>\n\n def add_recording_audio_ids(self, take_id, participants):\n\n def execute_sql(cursor):\n audio_ids = {}\n for participant_id in participants:\n cursor.execute('INSERT INTO AudioIdentifiers DEFAULT VALUES')\n audio_id = cursor.lastrowid\n cursor.execute('INSERT INTO Recordings ' +\n '(audioId, participantId, takeId, complete) ' +\n 'VALUES (?, ?, ?, 0)', (audio_id, participant_id, take_id))\n audio_ids[participant_id] = audio_id\n return audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to add recordings for participants: ' + str(error)\n )\n return error\n d.addErrback(on_error)\n return d\n",
"step-2": "<mask token>\n\n\nclass Database:\n\n def __init__(self, context, db_filename='database.sqlite'):\n session_files = context['session_files']\n db_filename = session_files.session_dir / db_filename\n database_exists = db_filename.is_file()\n\n def setup_connection(connection):\n cursor = connection.cursor()\n cursor.execute('PRAGMA foreign_keys = ON;')\n dbpool = adbapi.ConnectionPool('sqlite3', db_filename, cp_openfun=\n setup_connection, check_same_thread=False)\n if not database_exists:\n print('Database requires initialisation')\n self._db_ready = dbpool.runInteraction(self._initialise_database)\n\n def on_success(data):\n log.info('Database successfully initialised')\n return dbpool\n\n def on_error(data):\n log.error(f\"Failed to initialise the server's database: {data}\"\n )\n reactor = context['reactor']\n reactor.stop()\n self._db_ready.addCallback(on_success)\n self._db_ready.addErrback(on_error)\n else:\n self._db_ready = defer.Deferred()\n self._db_ready.callback(dbpool)\n expected_version = 4\n\n def check_version(cursor):\n cursor.execute('SELECT version FROM Version')\n row = cursor.fetchone()\n if row is None:\n raise Exception('No version found in Version table of database'\n )\n if row[0] == expected_version:\n log.info(f'Server database version {expected_version}')\n return dbpool\n else:\n reactor = context['reactor']\n reactor.stop()\n raise Exception(\n f'Database version ({row[0]}) did not match expected version ({expected_version}). Terminating.'\n )\n\n def run_check_version(dbpool):\n return dbpool.runInteraction(check_version)\n d = self.get_dbpool()\n d.addCallback(run_check_version)\n\n def on_error(error):\n log.error('Failed to verify the database: ' + str(error))\n reactor = context['reactor']\n reactor.stop()\n d.addErrback(on_error)\n\n def _initialise_database(self, cursor):\n log.info('Initialising database')\n initialisation_commands_filename = pkg_resources.resource_filename(\n 'singtserver', 'database.sql')\n f = open(initialisation_commands_filename, 'r')\n initialisation_commands = f.read()\n return cursor.executescript(initialisation_commands)\n\n def get_dbpool(self):\n d = defer.Deferred()\n\n def db_ready(db):\n d.callback(db)\n return db\n self._db_ready.addCallback(db_ready)\n return d\n <mask token>\n <mask token>\n\n def get_track_audio_id(self, track_id):\n \"\"\"Returns track's audio id or None.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute('SELECT audioId FROM BackingTracks WHERE id = ?',\n (track_id,))\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get audio ID for track id ({track_id}): ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n <mask token>\n\n def assign_participant(self, client_id, name):\n \"\"\"Assigns the name to the client id.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute(\n 'SELECT participantName FROM Participants WHERE id = ?', (\n client_id,))\n row = cursor.fetchone()\n if row is None:\n cursor.execute(\n 'INSERT INTO Participants (id, participantName) ' +\n 'VALUES (?, ?)', (client_id, name))\n return client_id\n current_name = row[0]\n if name == current_name:\n return client_id\n cursor.execute(\n 'UPDATE Participants SET participantName = ? WHERE id = ?',\n (name, client_id))\n return client_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to add participant given name '{name}' and id '{client_id}': \"\n + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_participants(self):\n\n def execute_sql(cursor):\n cursor.execute('SELECT id, participantName FROM Participants')\n rows = cursor.fetchall()\n results = [{'id': id_, 'name': name} for id_, name in rows]\n return results\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get participant list: ' + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_audio_ids_from_combination_id(self, combination_id):\n\n def execute_sql(cursor):\n cursor.execute('SELECT BackingTracks.audioId\\n' +\n 'FROM Combinations\\n' + \"\"\"LEFT JOIN BackingTracks\n\"\"\" +\n 'ON Combinations.backingTrackId = BackingTracks.id\\n' +\n 'WHERE combinations.id = ?', (combination_id,))\n rows = cursor.fetchall()\n if len(rows) == 0:\n backing_audio_ids = []\n elif len(rows) == 1:\n row = rows[0]\n audio_id = row[0]\n backing_audio_ids = [audio_id]\n else:\n raise Exception(f'More than one backing track matched ' +\n f'combination id {combination_id}; this ' +\n f\"shouldn't be possible\")\n cursor.execute('SELECT audioId\\n' + 'FROM CombinationsDetail\\n' +\n 'LEFT JOIN Takes\\n' +\n \"\"\"ON CombinationsDetail.id = Takes.combinationId\n\"\"\" +\n 'WHERE CombinationsDetail.combinationId = ?', (combination_id,)\n )\n rows = cursor.fetchall()\n if len(rows) == 0:\n if len(backing_audio_ids) == 0:\n raise Exception(\n f'We have neither a backing track nor takes ' +\n f'for the given combination id ({combination_id});' +\n f\"this shouldn't be possible\")\n else:\n takes_audio_ids = [row[0] for row in rows]\n backing_audio_ids += takes_audio_ids\n return backing_audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n 'Failed to get backing audio ids from combination id: ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n <mask token>\n\n def add_recording_audio_ids(self, take_id, participants):\n\n def execute_sql(cursor):\n audio_ids = {}\n for participant_id in participants:\n cursor.execute('INSERT INTO AudioIdentifiers DEFAULT VALUES')\n audio_id = cursor.lastrowid\n cursor.execute('INSERT INTO Recordings ' +\n '(audioId, participantId, takeId, complete) ' +\n 'VALUES (?, ?, ?, 0)', (audio_id, participant_id, take_id))\n audio_ids[participant_id] = audio_id\n return audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to add recordings for participants: ' + str(error)\n )\n return error\n d.addErrback(on_error)\n return d\n",
"step-3": "<mask token>\n\n\nclass Database:\n\n def __init__(self, context, db_filename='database.sqlite'):\n session_files = context['session_files']\n db_filename = session_files.session_dir / db_filename\n database_exists = db_filename.is_file()\n\n def setup_connection(connection):\n cursor = connection.cursor()\n cursor.execute('PRAGMA foreign_keys = ON;')\n dbpool = adbapi.ConnectionPool('sqlite3', db_filename, cp_openfun=\n setup_connection, check_same_thread=False)\n if not database_exists:\n print('Database requires initialisation')\n self._db_ready = dbpool.runInteraction(self._initialise_database)\n\n def on_success(data):\n log.info('Database successfully initialised')\n return dbpool\n\n def on_error(data):\n log.error(f\"Failed to initialise the server's database: {data}\"\n )\n reactor = context['reactor']\n reactor.stop()\n self._db_ready.addCallback(on_success)\n self._db_ready.addErrback(on_error)\n else:\n self._db_ready = defer.Deferred()\n self._db_ready.callback(dbpool)\n expected_version = 4\n\n def check_version(cursor):\n cursor.execute('SELECT version FROM Version')\n row = cursor.fetchone()\n if row is None:\n raise Exception('No version found in Version table of database'\n )\n if row[0] == expected_version:\n log.info(f'Server database version {expected_version}')\n return dbpool\n else:\n reactor = context['reactor']\n reactor.stop()\n raise Exception(\n f'Database version ({row[0]}) did not match expected version ({expected_version}). Terminating.'\n )\n\n def run_check_version(dbpool):\n return dbpool.runInteraction(check_version)\n d = self.get_dbpool()\n d.addCallback(run_check_version)\n\n def on_error(error):\n log.error('Failed to verify the database: ' + str(error))\n reactor = context['reactor']\n reactor.stop()\n d.addErrback(on_error)\n\n def _initialise_database(self, cursor):\n log.info('Initialising database')\n initialisation_commands_filename = pkg_resources.resource_filename(\n 'singtserver', 'database.sql')\n f = open(initialisation_commands_filename, 'r')\n initialisation_commands = f.read()\n return cursor.executescript(initialisation_commands)\n\n def get_dbpool(self):\n d = defer.Deferred()\n\n def db_ready(db):\n d.callback(db)\n return db\n self._db_ready.addCallback(db_ready)\n return d\n\n def get_combination(self, track_id=None, take_ids=[]):\n if track_id is None and len(take_ids) == 0:\n raise Exception(\n 'Getting a combination from the database requires ' +\n 'at least a Track ID or at least one Take ID')\n\n def get_combo(cursor):\n if track_id is None:\n assert len(take_ids) > 0\n sql = ('SELECT id\\n' + 'FROM Combinations\\n' +\n \"\"\"WHERE backingTrackId IS NULL\n\"\"\" +\n ' AND id IN\\n' + ' (SELECT combinationId\\n' +\n ' FROM CombinationsDetail\\n' +\n \"\"\" GROUP BY combinationId\n\"\"\" +\n ' HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0) = ?'\n .format(seq=','.join(['?'] * len(take_ids))))\n cursor.execute(sql, (*take_ids, len(take_ids)))\n elif len(take_ids) == 0:\n sql = ('SELECT id\\n' + 'FROM Combinations\\n' +\n 'WHERE backingTrackId = ?\\n' +\n \"\"\" AND NOT EXISTS\n\"\"\" + ' (SELECT * \\n' +\n ' FROM CombinationsDetail\\n' +\n ' WHERE combinationId = Combinations.id)')\n cursor.execute(sql, (track_id,))\n else:\n sql = ('SELECT id\\n' + 'FROM Combinations\\n' +\n 'WHERE backingTrackId = ?\\n' + ' AND id IN\\n' +\n \"\"\" (SELECT combinationId\n\"\"\" +\n ' FROM CombinationsDetail\\n' +\n ' GROUP BY combinationId\\n' +\n ' HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0 END) = ?)'\n ).format(seq=','.join(['?'] * len(take_ids)))\n cursor.execute(sql, (track_id, *take_ids, len(take_ids)))\n row = cursor.fetchone()\n if row is None:\n return None\n combo_id = row[0]\n return combo_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(get_combo)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_success(data):\n log.info(\n 'Successfully added combination to database; combination id: '\n + str(data))\n return data\n d.addCallback(on_success)\n\n def on_error(error):\n log.error('Failed to add combination to the database: ' + str(\n error))\n raise Exception('Failed to add combination to the database')\n d.addErrback(on_error)\n return d\n <mask token>\n\n def get_track_audio_id(self, track_id):\n \"\"\"Returns track's audio id or None.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute('SELECT audioId FROM BackingTracks WHERE id = ?',\n (track_id,))\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get audio ID for track id ({track_id}): ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_take_audio_id(self, take_id):\n \"\"\"Returns take's audio id or None.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute('SELECT audioId FROM Takes WHERE id = ?', (take_id,)\n )\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get audio ID for take id ({take_id}): ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def assign_participant(self, client_id, name):\n \"\"\"Assigns the name to the client id.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute(\n 'SELECT participantName FROM Participants WHERE id = ?', (\n client_id,))\n row = cursor.fetchone()\n if row is None:\n cursor.execute(\n 'INSERT INTO Participants (id, participantName) ' +\n 'VALUES (?, ?)', (client_id, name))\n return client_id\n current_name = row[0]\n if name == current_name:\n return client_id\n cursor.execute(\n 'UPDATE Participants SET participantName = ? WHERE id = ?',\n (name, client_id))\n return client_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to add participant given name '{name}' and id '{client_id}': \"\n + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_participants(self):\n\n def execute_sql(cursor):\n cursor.execute('SELECT id, participantName FROM Participants')\n rows = cursor.fetchall()\n results = [{'id': id_, 'name': name} for id_, name in rows]\n return results\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get participant list: ' + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_audio_ids_from_combination_id(self, combination_id):\n\n def execute_sql(cursor):\n cursor.execute('SELECT BackingTracks.audioId\\n' +\n 'FROM Combinations\\n' + \"\"\"LEFT JOIN BackingTracks\n\"\"\" +\n 'ON Combinations.backingTrackId = BackingTracks.id\\n' +\n 'WHERE combinations.id = ?', (combination_id,))\n rows = cursor.fetchall()\n if len(rows) == 0:\n backing_audio_ids = []\n elif len(rows) == 1:\n row = rows[0]\n audio_id = row[0]\n backing_audio_ids = [audio_id]\n else:\n raise Exception(f'More than one backing track matched ' +\n f'combination id {combination_id}; this ' +\n f\"shouldn't be possible\")\n cursor.execute('SELECT audioId\\n' + 'FROM CombinationsDetail\\n' +\n 'LEFT JOIN Takes\\n' +\n \"\"\"ON CombinationsDetail.id = Takes.combinationId\n\"\"\" +\n 'WHERE CombinationsDetail.combinationId = ?', (combination_id,)\n )\n rows = cursor.fetchall()\n if len(rows) == 0:\n if len(backing_audio_ids) == 0:\n raise Exception(\n f'We have neither a backing track nor takes ' +\n f'for the given combination id ({combination_id});' +\n f\"this shouldn't be possible\")\n else:\n takes_audio_ids = [row[0] for row in rows]\n backing_audio_ids += takes_audio_ids\n return backing_audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n 'Failed to get backing audio ids from combination id: ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def add_take(self, take_name, combination_id):\n\n def execute_sql(cursor):\n cursor.execute('INSERT INTO AudioIdentifiers DEFAULT VALUES')\n audio_id = cursor.lastrowid\n cursor.execute(\n 'INSERT INTO Takes (audioId, combinationId, takeName, complete) '\n + 'VALUES (?, ?, ?, 0)', (audio_id, combination_id, take_name)\n )\n take_id = cursor.lastrowid\n return take_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to add take: ' + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def add_recording_audio_ids(self, take_id, participants):\n\n def execute_sql(cursor):\n audio_ids = {}\n for participant_id in participants:\n cursor.execute('INSERT INTO AudioIdentifiers DEFAULT VALUES')\n audio_id = cursor.lastrowid\n cursor.execute('INSERT INTO Recordings ' +\n '(audioId, participantId, takeId, complete) ' +\n 'VALUES (?, ?, ?, 0)', (audio_id, participant_id, take_id))\n audio_ids[participant_id] = audio_id\n return audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to add recordings for participants: ' + str(error)\n )\n return error\n d.addErrback(on_error)\n return d\n",
"step-4": "import pkg_resources\nfrom twisted.enterprise import adbapi\nfrom twisted.internet import defer\nfrom twisted.logger import Logger\nlog = Logger('database')\n\n\nclass Database:\n\n def __init__(self, context, db_filename='database.sqlite'):\n session_files = context['session_files']\n db_filename = session_files.session_dir / db_filename\n database_exists = db_filename.is_file()\n\n def setup_connection(connection):\n cursor = connection.cursor()\n cursor.execute('PRAGMA foreign_keys = ON;')\n dbpool = adbapi.ConnectionPool('sqlite3', db_filename, cp_openfun=\n setup_connection, check_same_thread=False)\n if not database_exists:\n print('Database requires initialisation')\n self._db_ready = dbpool.runInteraction(self._initialise_database)\n\n def on_success(data):\n log.info('Database successfully initialised')\n return dbpool\n\n def on_error(data):\n log.error(f\"Failed to initialise the server's database: {data}\"\n )\n reactor = context['reactor']\n reactor.stop()\n self._db_ready.addCallback(on_success)\n self._db_ready.addErrback(on_error)\n else:\n self._db_ready = defer.Deferred()\n self._db_ready.callback(dbpool)\n expected_version = 4\n\n def check_version(cursor):\n cursor.execute('SELECT version FROM Version')\n row = cursor.fetchone()\n if row is None:\n raise Exception('No version found in Version table of database'\n )\n if row[0] == expected_version:\n log.info(f'Server database version {expected_version}')\n return dbpool\n else:\n reactor = context['reactor']\n reactor.stop()\n raise Exception(\n f'Database version ({row[0]}) did not match expected version ({expected_version}). Terminating.'\n )\n\n def run_check_version(dbpool):\n return dbpool.runInteraction(check_version)\n d = self.get_dbpool()\n d.addCallback(run_check_version)\n\n def on_error(error):\n log.error('Failed to verify the database: ' + str(error))\n reactor = context['reactor']\n reactor.stop()\n d.addErrback(on_error)\n\n def _initialise_database(self, cursor):\n log.info('Initialising database')\n initialisation_commands_filename = pkg_resources.resource_filename(\n 'singtserver', 'database.sql')\n f = open(initialisation_commands_filename, 'r')\n initialisation_commands = f.read()\n return cursor.executescript(initialisation_commands)\n\n def get_dbpool(self):\n d = defer.Deferred()\n\n def db_ready(db):\n d.callback(db)\n return db\n self._db_ready.addCallback(db_ready)\n return d\n\n def get_combination(self, track_id=None, take_ids=[]):\n if track_id is None and len(take_ids) == 0:\n raise Exception(\n 'Getting a combination from the database requires ' +\n 'at least a Track ID or at least one Take ID')\n\n def get_combo(cursor):\n if track_id is None:\n assert len(take_ids) > 0\n sql = ('SELECT id\\n' + 'FROM Combinations\\n' +\n \"\"\"WHERE backingTrackId IS NULL\n\"\"\" +\n ' AND id IN\\n' + ' (SELECT combinationId\\n' +\n ' FROM CombinationsDetail\\n' +\n \"\"\" GROUP BY combinationId\n\"\"\" +\n ' HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0) = ?'\n .format(seq=','.join(['?'] * len(take_ids))))\n cursor.execute(sql, (*take_ids, len(take_ids)))\n elif len(take_ids) == 0:\n sql = ('SELECT id\\n' + 'FROM Combinations\\n' +\n 'WHERE backingTrackId = ?\\n' +\n \"\"\" AND NOT EXISTS\n\"\"\" + ' (SELECT * \\n' +\n ' FROM CombinationsDetail\\n' +\n ' WHERE combinationId = Combinations.id)')\n cursor.execute(sql, (track_id,))\n else:\n sql = ('SELECT id\\n' + 'FROM Combinations\\n' +\n 'WHERE backingTrackId = ?\\n' + ' AND id IN\\n' +\n \"\"\" (SELECT combinationId\n\"\"\" +\n ' FROM CombinationsDetail\\n' +\n ' GROUP BY combinationId\\n' +\n ' HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0 END) = ?)'\n ).format(seq=','.join(['?'] * len(take_ids)))\n cursor.execute(sql, (track_id, *take_ids, len(take_ids)))\n row = cursor.fetchone()\n if row is None:\n return None\n combo_id = row[0]\n return combo_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(get_combo)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_success(data):\n log.info(\n 'Successfully added combination to database; combination id: '\n + str(data))\n return data\n d.addCallback(on_success)\n\n def on_error(error):\n log.error('Failed to add combination to the database: ' + str(\n error))\n raise Exception('Failed to add combination to the database')\n d.addErrback(on_error)\n return d\n\n def add_combination(self, track_id=None, take_ids=[]):\n \"\"\"Adds combination into database.\n\n Returns combo_id.\n \"\"\"\n log.info(\n f'Adding combination to database with track id = {track_id} and take_ids = {take_ids}'\n )\n if track_id is None and len(take_ids) == 0:\n raise Exception(\n 'Adding a combination to the database requires ' +\n 'at least a Track ID or at least one Take ID')\n\n def add_combo(cursor):\n cursor.execute('INSERT INTO AudioIdentifiers DEFAULT VALUES')\n audio_id = cursor.lastrowid\n print('track_id:', track_id)\n cursor.execute(\n 'INSERT INTO Combinations (audioId, backingTrackId) VALUES (?, ?)'\n , (audio_id, track_id))\n combo_id = cursor.lastrowid\n for take_id in take_ids:\n cursor.execute(\n 'INSERT INTO CombinationsDetail (combinationId, takeId) ' +\n 'VALUES (?,?)', (combo_id, take_id))\n return combo_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(add_combo)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_success(data):\n log.info(\n 'Successfully added combination to database; combination id: '\n + str(data))\n return data\n\n def on_error(error):\n log.error('Failed to add combination to the database: ' + str(\n error))\n raise Exception('Failed to add combination to the database')\n d.addCallback(on_success)\n d.addErrback(on_error)\n return d\n\n def get_track_audio_id(self, track_id):\n \"\"\"Returns track's audio id or None.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute('SELECT audioId FROM BackingTracks WHERE id = ?',\n (track_id,))\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get audio ID for track id ({track_id}): ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_take_audio_id(self, take_id):\n \"\"\"Returns take's audio id or None.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute('SELECT audioId FROM Takes WHERE id = ?', (take_id,)\n )\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get audio ID for take id ({take_id}): ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def assign_participant(self, client_id, name):\n \"\"\"Assigns the name to the client id.\"\"\"\n\n def execute_sql(cursor):\n cursor.execute(\n 'SELECT participantName FROM Participants WHERE id = ?', (\n client_id,))\n row = cursor.fetchone()\n if row is None:\n cursor.execute(\n 'INSERT INTO Participants (id, participantName) ' +\n 'VALUES (?, ?)', (client_id, name))\n return client_id\n current_name = row[0]\n if name == current_name:\n return client_id\n cursor.execute(\n 'UPDATE Participants SET participantName = ? WHERE id = ?',\n (name, client_id))\n return client_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to add participant given name '{name}' and id '{client_id}': \"\n + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_participants(self):\n\n def execute_sql(cursor):\n cursor.execute('SELECT id, participantName FROM Participants')\n rows = cursor.fetchall()\n results = [{'id': id_, 'name': name} for id_, name in rows]\n return results\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to get participant list: ' + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def get_audio_ids_from_combination_id(self, combination_id):\n\n def execute_sql(cursor):\n cursor.execute('SELECT BackingTracks.audioId\\n' +\n 'FROM Combinations\\n' + \"\"\"LEFT JOIN BackingTracks\n\"\"\" +\n 'ON Combinations.backingTrackId = BackingTracks.id\\n' +\n 'WHERE combinations.id = ?', (combination_id,))\n rows = cursor.fetchall()\n if len(rows) == 0:\n backing_audio_ids = []\n elif len(rows) == 1:\n row = rows[0]\n audio_id = row[0]\n backing_audio_ids = [audio_id]\n else:\n raise Exception(f'More than one backing track matched ' +\n f'combination id {combination_id}; this ' +\n f\"shouldn't be possible\")\n cursor.execute('SELECT audioId\\n' + 'FROM CombinationsDetail\\n' +\n 'LEFT JOIN Takes\\n' +\n \"\"\"ON CombinationsDetail.id = Takes.combinationId\n\"\"\" +\n 'WHERE CombinationsDetail.combinationId = ?', (combination_id,)\n )\n rows = cursor.fetchall()\n if len(rows) == 0:\n if len(backing_audio_ids) == 0:\n raise Exception(\n f'We have neither a backing track nor takes ' +\n f'for the given combination id ({combination_id});' +\n f\"this shouldn't be possible\")\n else:\n takes_audio_ids = [row[0] for row in rows]\n backing_audio_ids += takes_audio_ids\n return backing_audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n 'Failed to get backing audio ids from combination id: ' +\n str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def add_take(self, take_name, combination_id):\n\n def execute_sql(cursor):\n cursor.execute('INSERT INTO AudioIdentifiers DEFAULT VALUES')\n audio_id = cursor.lastrowid\n cursor.execute(\n 'INSERT INTO Takes (audioId, combinationId, takeName, complete) '\n + 'VALUES (?, ?, ?, 0)', (audio_id, combination_id, take_name)\n )\n take_id = cursor.lastrowid\n return take_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to add take: ' + str(error))\n return error\n d.addErrback(on_error)\n return d\n\n def add_recording_audio_ids(self, take_id, participants):\n\n def execute_sql(cursor):\n audio_ids = {}\n for participant_id in participants:\n cursor.execute('INSERT INTO AudioIdentifiers DEFAULT VALUES')\n audio_id = cursor.lastrowid\n cursor.execute('INSERT INTO Recordings ' +\n '(audioId, participantId, takeId, complete) ' +\n 'VALUES (?, ?, ?, 0)', (audio_id, participant_id, take_id))\n audio_ids[participant_id] = audio_id\n return audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn('Failed to add recordings for participants: ' + str(error)\n )\n return error\n d.addErrback(on_error)\n return d\n",
"step-5": "import pkg_resources\nfrom twisted.enterprise import adbapi\nfrom twisted.internet import defer\n\n# Start a logger with a namespace for a particular subsystem of our application.\nfrom twisted.logger import Logger\nlog = Logger(\"database\")\n\nclass Database:\n def __init__(self, context, db_filename=\"database.sqlite\"):\n # Get full path and filename for database\n session_files = context[\"session_files\"]\n db_filename = session_files.session_dir / db_filename\n \n # Note if database already exists\n database_exists = db_filename.is_file()\n\n # Callback for every connection that is established to the\n # database\n def setup_connection(connection):\n # Turn on foreign key constraints\n cursor = connection.cursor()\n cursor.execute(\"PRAGMA foreign_keys = ON;\")\n\n # # Turn on column names in rows\n # import sqlite3\n # connection.row_factory = sqlite3.Row\n \n # Open a connection to the database. SQLite will create the file if\n # it doesn't already exist.\n dbpool = adbapi.ConnectionPool(\n \"sqlite3\",\n db_filename,\n cp_openfun=setup_connection,\n check_same_thread=False # See https://twistedmatrix.com/trac/ticket/3629\n )\n\n # If the database did not exist, initialise the database\n if not database_exists:\n print(\"Database requires initialisation\")\n self._db_ready = dbpool.runInteraction(self._initialise_database)\n def on_success(data):\n log.info(\"Database successfully initialised\")\n return dbpool\n def on_error(data):\n log.error(f\"Failed to initialise the server's database: {data}\")\n reactor = context[\"reactor\"]\n reactor.stop()\n\n self._db_ready.addCallback(on_success)\n self._db_ready.addErrback(on_error)\n else:\n # Database exists already\n self._db_ready = defer.Deferred()\n self._db_ready.callback(dbpool)\n\n # Check that database is the correct version\n expected_version = 4\n def check_version(cursor):\n cursor.execute(\"SELECT version FROM Version\")\n row = cursor.fetchone()\n if row is None:\n raise Exception(\"No version found in Version table of database\")\n if row[0] == expected_version:\n log.info(f\"Server database version {expected_version}\")\n return dbpool\n else:\n reactor = context[\"reactor\"]\n reactor.stop()\n raise Exception(f\"Database version ({row[0]}) did not match expected version ({expected_version}). Terminating.\")\n\n def run_check_version(dbpool):\n return dbpool.runInteraction(check_version)\n d = self.get_dbpool()\n d.addCallback(run_check_version)\n\n def on_error(error):\n log.error(\"Failed to verify the database: \"+str(error))\n reactor = context[\"reactor\"]\n reactor.stop()\n d.addErrback(on_error)\n\n \n # Initialise the database structure from instructions in file\n def _initialise_database(self, cursor):\n log.info(\"Initialising database\")\n initialisation_commands_filename = \\\n pkg_resources.resource_filename(\n \"singtserver\",\n \"database.sql\"\n )\n f = open(initialisation_commands_filename, \"r\")\n initialisation_commands = f.read()\n return cursor.executescript(initialisation_commands)\n\n\n def get_dbpool(self):\n d = defer.Deferred()\n def db_ready(db):\n d.callback(db)\n return db\n self._db_ready.addCallback(db_ready)\n \n return d\n\n \n def get_combination(self, track_id=None, take_ids=[]):\n # Sanity check arguments\n if (track_id is None\n and len(take_ids) == 0):\n raise Exception(\n \"Getting a combination from the database requires \"+\n \"at least a Track ID or at least one Take ID\"\n )\n\n # Get combination from database.\n # See answers to https://stackoverflow.com/questions/63356820/sql-select-from-many-to-one\n # and https://stackoverflow.com/a/5766293/562930\n def get_combo(cursor):\n if track_id is None:\n assert len(take_ids) > 0\n sql = (\n \"SELECT id\\n\"+\n \"FROM Combinations\\n\"+\n \"WHERE backingTrackId IS NULL\\n\"+\n \" AND id IN\\n\"+\n \" (SELECT combinationId\\n\"+\n \" FROM CombinationsDetail\\n\"+\n \" GROUP BY combinationId\\n\" +\n \" HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0) = ?\".format(\n seq=\",\".join([\"?\"]*len(take_ids))\n )\n )\n cursor.execute(\n sql,\n (*take_ids, len(take_ids))\n )\n \n elif len(take_ids) == 0:\n sql = (\n \"SELECT id\\n\"+\n \"FROM Combinations\\n\"+\n \"WHERE backingTrackId = ?\\n\"+\n \" AND NOT EXISTS\\n\"+\n \" (SELECT * \\n\"+\n \" FROM CombinationsDetail\\n\"+\n \" WHERE combinationId = Combinations.id)\"\n )\n cursor.execute(\n sql,\n (track_id, )\n )\n \n else:\n sql = (\"SELECT id\\n\"+\n \"FROM Combinations\\n\"+\n \"WHERE backingTrackId = ?\\n\"+\n \" AND id IN\\n\"+\n \" (SELECT combinationId\\n\"+\n \" FROM CombinationsDetail\\n\"+\n \" GROUP BY combinationId\\n\" +\n \" HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0 END) = ?)\").format(\n seq=\",\".join(['?']*len(take_ids))\n )\n cursor.execute(\n sql,\n (track_id, *take_ids, len(take_ids))\n )\n\n # Although there should be at most only one combo id that\n # matches the track and takes specification, even if there\n # are more than one, we'll just return the first (or None\n # if there aren't any).\n row = cursor.fetchone()\n if row is None:\n return None\n combo_id = row[0]\n return combo_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(get_combo)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_success(data):\n log.info(\"Successfully added combination to database; combination id: \"+str(data))\n return data\n d.addCallback(on_success)\n \n def on_error(error):\n log.error(\"Failed to add combination to the database: \"+str(error))\n raise Exception(\"Failed to add combination to the database\")\n d.addErrback(on_error)\n\n return d\n\n\n def add_combination(self, track_id=None, take_ids=[]):\n \"\"\"Adds combination into database.\n\n Returns combo_id.\n \"\"\"\n log.info(f\"Adding combination to database with track id = {track_id} and take_ids = {take_ids}\")\n # Sanity check arguments\n if (track_id is None\n and len(take_ids) == 0):\n raise Exception(\n \"Adding a combination to the database requires \"+\n \"at least a Track ID or at least one Take ID\"\n )\n\n # Create combination in database\n def add_combo(cursor):\n # Create audio id\n cursor.execute(\"INSERT INTO AudioIdentifiers DEFAULT VALUES\")\n audio_id = cursor.lastrowid\n \n print(\"track_id:\", track_id)\n cursor.execute(\n \"INSERT INTO Combinations (audioId, backingTrackId) VALUES (?, ?)\",\n (audio_id, track_id)\n )\n combo_id = cursor.lastrowid\n\n for take_id in take_ids:\n cursor.execute(\n \"INSERT INTO CombinationsDetail (combinationId, takeId) \"+\n \"VALUES (?,?)\",\n (combo_id, take_id)\n )\n \n return combo_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(add_combo)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_success(data):\n log.info(\"Successfully added combination to database; combination id: \"+str(data))\n return data\n def on_error(error):\n log.error(\"Failed to add combination to the database: \"+str(error))\n raise Exception(\"Failed to add combination to the database\")\n\n d.addCallback(on_success)\n d.addErrback(on_error)\n\n return d\n \n\n def get_track_audio_id(self, track_id):\n \"\"\"Returns track's audio id or None.\"\"\"\n def execute_sql(cursor):\n cursor.execute(\"SELECT audioId FROM BackingTracks WHERE id = ?\",\n (track_id,))\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n \n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\"Failed to get audio ID for track id ({track_id}): \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d\n \n\n def get_take_audio_id(self, take_id):\n \"\"\"Returns take's audio id or None.\"\"\"\n def execute_sql(cursor):\n cursor.execute(\"SELECT audioId FROM Takes WHERE id = ?\",\n (take_id,))\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n \n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\"Failed to get audio ID for take id ({take_id}): \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d\n\n \n def assign_participant(self, client_id, name):\n \"\"\"Assigns the name to the client id.\"\"\"\n\n def execute_sql(cursor):\n # First, check if the id already exists\n cursor.execute(\n \"SELECT participantName FROM Participants WHERE id = ?\",\n (client_id,)\n )\n row = cursor.fetchone()\n if row is None:\n # We don't currently have this ID, insert it\n cursor.execute(\n \"INSERT INTO Participants (id, participantName) \"+\n \"VALUES (?, ?)\",\n (client_id, name)\n )\n return client_id\n\n # Otherwise, a row does already exist\n current_name = row[0]\n if name == current_name:\n # We have nothing to do, the database is already\n # correct\n return client_id\n\n # Otherwise, we need to update the database\n cursor.execute(\n \"UPDATE Participants SET participantName = ? WHERE id = ?\",\n (name, client_id)\n )\n return client_id\n \n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to add participant given name '{name}' and id '{client_id}': \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d\n\n \n def get_participants(self):\n def execute_sql(cursor):\n cursor.execute(\"SELECT id, participantName FROM Participants\")\n rows = cursor.fetchall()\n results = [{\"id\":id_, \"name\":name} for id_, name in rows]\n return results\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to get participant list: \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d\n \n def get_audio_ids_from_combination_id(self, combination_id):\n def execute_sql(cursor):\n # Get Track ID. There should be either zero or one, but\n # not more.\n cursor.execute(\n \"SELECT BackingTracks.audioId\\n\"+\n \"FROM Combinations\\n\"+\n \"LEFT JOIN BackingTracks\\n\"+\n \"ON Combinations.backingTrackId = BackingTracks.id\\n\"+\n \"WHERE combinations.id = ?\",\n (combination_id,)\n )\n rows = cursor.fetchall()\n if len(rows) == 0:\n # We don't have a backing track; that's fine, move on\n # to the takes.\n backing_audio_ids = []\n elif len(rows) == 1:\n # We have one backing track\n row = rows[0]\n audio_id = row[0]\n backing_audio_ids = [audio_id]\n else:\n # We have more than one backing track; error.\n raise Exception(\n f\"More than one backing track matched \"+\n f\"combination id {combination_id}; this \"+\n f\"shouldn't be possible\"\n )\n\n # Get the Take IDs. There may be many of these. But if\n # there wasn't a backing track id, then there needs to be\n # at least one Take ID.\n cursor.execute(\n \"SELECT audioId\\n\"+\n \"FROM CombinationsDetail\\n\"+\n \"LEFT JOIN Takes\\n\"+\n \"ON CombinationsDetail.id = Takes.combinationId\\n\"+\n \"WHERE CombinationsDetail.combinationId = ?\",\n (combination_id,)\n )\n rows = cursor.fetchall()\n if len(rows) == 0:\n # This is only as issue if we don't have any backing\n # tracks either\n if len(backing_audio_ids) == 0:\n raise Exception(\n f\"We have neither a backing track nor takes \"+\n f\"for the given combination id ({combination_id});\"+\n f\"this shouldn't be possible\"\n )\n else:\n # Add the Take IDs to the list \n takes_audio_ids = [row[0] for row in rows]\n backing_audio_ids += takes_audio_ids\n \n return backing_audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to get backing audio ids from combination id: \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d\n \n def add_take(self, take_name, combination_id):\n def execute_sql(cursor):\n # Create audio id\n cursor.execute(\"INSERT INTO AudioIdentifiers DEFAULT VALUES\")\n audio_id = cursor.lastrowid\n\n # Create take\n cursor.execute(\n \"INSERT INTO Takes (audioId, combinationId, takeName, complete) \"+\n \"VALUES (?, ?, ?, 0)\",\n (audio_id, combination_id, take_name)\n )\n take_id = cursor.lastrowid\n\n return take_id\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to add take: \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d\n \n def add_recording_audio_ids(self, take_id, participants):\n def execute_sql(cursor):\n audio_ids = {}\n for participant_id in participants:\n # Create audio id\n cursor.execute(\"INSERT INTO AudioIdentifiers DEFAULT VALUES\")\n audio_id = cursor.lastrowid\n\n # Add entry into Recordings\n cursor.execute(\n \"INSERT INTO Recordings \"+\n \"(audioId, participantId, takeId, complete) \"+\n \"VALUES (?, ?, ?, 0)\",\n (audio_id, participant_id, take_id)\n )\n \n audio_ids[participant_id] = audio_id\n return audio_ids\n\n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\n \"Failed to add recordings for participants: \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d\n\n",
"step-ids": [
8,
9,
12,
15,
16
]
}
|
[
8,
9,
12,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add():
print(a)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add():
print(a)
add()
<|reserved_special_token_1|>
a = 'aa'
def add():
print(a)
add()
<|reserved_special_token_1|>
# data={
# "name":"Alby",
# "age":23
# }
# print (data['age'])
# def foo():
# print("Hellow world")
# return 1
# print (foo())
a="aa"
def add():
print(a)
add()
|
flexible
|
{
"blob_id": "97857c1c5468a96187d44abc23ffaaf2a7ead1a6",
"index": 1869,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef add():\n print(a)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add():\n print(a)\n\n\nadd()\n",
"step-4": "a = 'aa'\n\n\ndef add():\n print(a)\n\n\nadd()\n",
"step-5": "# data={\n# \"name\":\"Alby\",\n# \"age\":23\n# }\n\n\n# print (data['age'])\n\n# def foo():\n# print(\"Hellow world\")\n# return 1\n\n# print (foo())\na=\"aa\"\n\ndef add():\n \n print(a)\n\n\nadd()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_qr(query):
img = qrcode.make(query)
<|reserved_special_token_1|>
import qrcode
def generate_qr(query):
img = qrcode.make(query)
|
flexible
|
{
"blob_id": "e97bcf31657317f33f4a138ede80bb9171337f52",
"index": 4730,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_qr(query):\n img = qrcode.make(query)\n",
"step-3": "import qrcode\n\n\ndef generate_qr(query):\n img = qrcode.make(query)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(101):
result += i
print(result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
result = 0
for i in range(101):
result += i
print(result)
<|reserved_special_token_1|>
"""
챕터: day4
주제: 반복문(for문)
문제: 1에서 100까지 합을 구하여 출력하시오.
작성자: 한현수
작성일: 2018.9.20.
"""
result = 0
for i in range(101):
result += i
print(result)
|
flexible
|
{
"blob_id": "d2754099adebdb4bd2b028fdf9015571ad773754",
"index": 9313,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(101):\n result += i\nprint(result)\n",
"step-3": "<mask token>\nresult = 0\nfor i in range(101):\n result += i\nprint(result)\n",
"step-4": "\"\"\"\n챕터: day4\n주제: 반복문(for문)\n문제: 1에서 100까지 합을 구하여 출력하시오.\n작성자: 한현수\n작성일: 2018.9.20.\n\"\"\"\nresult = 0\nfor i in range(101):\n result += i\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from mininet.cli import CLI
from mininet.term import makeTerms
from mininet.util import irange
from log import log
from utils import (UITextStyle, display)
from dijkstra import (get_routing_decision, get_route_cost)
# Check if route directly connects two switches
def isDirect(route):
return (len(route) == 2)
# Add purple background for indirect routes
def brightLabel(text):
return (UITextStyle.BackgroundColor.purple + str(text) + UITextStyle.Format.reset)
# Execute commands one by one
def __wait__(*commandList):
steps = len(commandList)
for i in range(steps):
commandList[i]('')
display.prompt('\n\nPress <Return> to continue (%s/%s)' %
(i + 1, steps))
try:
x = input('')
except:
x = ''
# Mininet Command Line Interface extension
class DongPhamTestCli(CLI):
prompt = 'dongpham> '
def __init__(self, _mininet, _env):
self.env = _env
self.net = _mininet
self._testCLI = {}
CLI.__init__(self, _mininet)
# Tell the controller to do a command
def do(self, shell, quiet=False):
if (quiet):
return self.mn.controller.cmd(shell)
return self.mn.controller.cmdPrint(shell)
def doPrint(self, shell):
display.cmdHighlight(True)
self.mn.controller.cmdPrint(shell)
display.cmdHighlight(False)
# Run all commands in the wait list
def do_all(self, _):
__wait__(
# Show ip
self.do_ips,
# Routing commands
self.do_weights, self.do_costs, self.do_routes, self.do_paths,
# Flow commands
self.do_flows, self.do_stats
)
# Show object info
# info [node1, node2, ...]
def do_info(self, line):
locals = self.getLocals()
_nodes = line.split()
display.section("All functions")
if not (_nodes):
_nodes = self.mn.keys()
for n in _nodes:
if not (locals.__contains__(n)):
break
obj = locals[n]
display.subsection('%s (%s)' % (n, obj.IP()))
print(dir(obj))
# Show IP addresses
# ips
def do_ips(self, _):
display.section("IP Addresses")
locals = self.getLocals()
def showIP(*keys):
for key in keys:
display.message('%s\t%s' % (key.name, key.IP()))
def showAll(*keys):
for key in keys:
display.message('%s\t%s\t%s' % (key.name, key.IP(), key.MAC()))
# For each node
display.subsection('Controllers')
for c in self.mn.controllers:
showIP(locals[c.name])
display.subsection('Switches')
for s in self.mn.switches:
showIP(locals[s.name])
display.subsection('Hosts')
for h in self.mn.hosts:
showAll(locals[h.name])
#MARK: - Routing
# Show link weights
# weights
def do_weights(self, _):
display.section("Weights")
log.infoln('Link\t\tWeight')
log.infoln('--------------------')
for (i, j, w) in self.mn.topo._slinks:
log.infoln('{s%s, s%s}\t%s' % (i, j, w))
# Show costs of reaching every other switch
# costs
def do_costs(self, _):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Print cost of reaching 'end' switch from 'start' switch
display.section("Total path costs")
print('From\\To'), ('\t'.join(switches))
for start in switches:
print(start + '\t'),
for end in switches:
if (start == end):
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
if (isDirect(route)):
# Print result for directly connected switches
print(cost),
else:
# Print and highlight routes with intermediate switches
print(brightLabel(cost)),
print('\t'),
print('')
# Show least-cost paths from every switch to every other switch
# routes
def do_routes(self, _):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Print next hop switch
display.section("First-Hop with lowest cost")
print('From\\To\t'), ('\t'.join(switches))
for start in switches:
print(start + '\t'),
for end in switches:
if (start == end):
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
if (isDirect(route)):
# Print result for directly connected switches
print(end),
else:
# Print and highlight routes with intermediate switches
print(brightLabel(route[1])),
print('\t'),
print('')
# Show the complete shortest path from one switch to every other switch
# paths
def do_paths(self, line):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Least cost paths to every node
display.section("Least-cost paths to other nodes")
display.message('From -> To\tCost\t\tFull Shortest Path')
for start in switches:
display.subsection('%s' % start)
for end in switches:
if (start == end):
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
display.message('%s -> %s\t%s\t\t%s' %
(start, end, cost, route))
#MARK: - OpenFlow
# Display flows
# flows
def do_flows(self, _line):
display.section("Showing all flows of all OVSSwitches")
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl dump-flows %s' % s)
# Delete flows
# deleteFlows
def do_deleteFlows(self, _line):
display.section("Deleting all flows of all OVSSwitches")
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl del-flows %s' % s)
# Display flow statistics
# stats
def do_stats(self, _):
display.section("OpenFlow: Sent/Received Packets")
display.message(
'Packets passing through a switch on the way host with IP address = "nw_dst"')
for s in self.mn.switches:
display.subsection('%s - Traffic' % s.name)
self.doPrint(
'sudo ovs-ofctl dump-flows %s | grep -e "n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]" -To' % (s.name))
# MARK: - Run on every node
# arps
def do_arps(self, _line):
display.section("ARP caches of all hosts")
sh = 'arp -a'
for h in self.mn.hosts:
h.cmdPrint(sh)
# netstats
def do_netstats(self, _line):
display.section("Routing Tables")
sh = 'netstat -rn'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
# ifconfigs
def do_ifconfigs(self, _line):
display.section("Showing Interface Configuration")
sh = 'ifconfig -a'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
#MARK: - Other
def do_xxx_testFlows1(self, _line):
display.section("Adding test flows to Tiny Network")
self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')
self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')
self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')
self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')
self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')
self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')
self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')
self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')
def do_xxx_traffic(self, _line):
# display.section("Monitoring sent and received packets of all hosts")
for h in self.mn.hosts:
h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)
def do_xxx_xterms(self, _line):
locals = self.getLocals()
terms = makeTerms([locals[name]
for name in ['h1', 'h2', 's1', 's2']])
self.mn.terms += terms
def do_xxx_sharks(self, line):
display.section("Launching Wireshark")
sh = 'sudo wireshark &'
locals = self.getLocals()
_nodes = line.split()
if not (_nodes):
_nodes = self.mn.keys()
for n in _nodes:
if not (locals.__contains__(n)):
break
obj = locals[n]
obj.cmdPrint(sh)
|
normal
|
{
"blob_id": "7636925982434b12307383ba7b01f931f7ea6e24",
"index": 5927,
"step-1": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n <mask token>\n <mask token>\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n <mask token>\n <mask token>\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n <mask token>\n <mask token>\n <mask token>\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-2": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n <mask token>\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n <mask token>\n <mask token>\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-3": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n def do(self, shell, quiet=False):\n if quiet:\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_xxx_testFlows1(self, _line):\n display.section('Adding test flows to Tiny Network')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-4": "from mininet.cli import CLI\nfrom mininet.term import makeTerms\nfrom mininet.util import irange\nfrom log import log\nfrom utils import UITextStyle, display\nfrom dijkstra import get_routing_decision, get_route_cost\n\n\ndef isDirect(route):\n return len(route) == 2\n\n\ndef brightLabel(text):\n return UITextStyle.BackgroundColor.purple + str(text\n ) + UITextStyle.Format.reset\n\n\ndef __wait__(*commandList):\n steps = len(commandList)\n for i in range(steps):\n commandList[i]('')\n display.prompt('\\n\\nPress <Return> to continue (%s/%s)' % (i + 1,\n steps))\n try:\n x = input('')\n except:\n x = ''\n\n\nclass DongPhamTestCli(CLI):\n prompt = 'dongpham> '\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n def do(self, shell, quiet=False):\n if quiet:\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_xxx_testFlows1(self, _line):\n display.section('Adding test flows to Tiny Network')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-5": "\nfrom mininet.cli import CLI\nfrom mininet.term import makeTerms\nfrom mininet.util import irange\n\nfrom log import log\nfrom utils import (UITextStyle, display)\n\nfrom dijkstra import (get_routing_decision, get_route_cost)\n\n# Check if route directly connects two switches\ndef isDirect(route):\n return (len(route) == 2)\n# Add purple background for indirect routes\n\n\ndef brightLabel(text):\n return (UITextStyle.BackgroundColor.purple + str(text) + UITextStyle.Format.reset)\n# Execute commands one by one\n\n\ndef __wait__(*commandList):\n steps = len(commandList)\n for i in range(steps):\n commandList[i]('')\n display.prompt('\\n\\nPress <Return> to continue (%s/%s)' %\n (i + 1, steps))\n try:\n x = input('')\n except:\n x = ''\n\n\n# Mininet Command Line Interface extension\nclass DongPhamTestCli(CLI):\n prompt = 'dongpham> '\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n # Tell the controller to do a command\n def do(self, shell, quiet=False):\n if (quiet):\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n # Run all commands in the wait list\n def do_all(self, _):\n __wait__(\n # Show ip\n self.do_ips,\n # Routing commands\n self.do_weights, self.do_costs, self.do_routes, self.do_paths,\n # Flow commands\n self.do_flows, self.do_stats\n )\n\n # Show object info\n # info [node1, node2, ...]\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section(\"All functions\")\n if not (_nodes):\n _nodes = self.mn.keys()\n for n in _nodes:\n if not (locals.__contains__(n)):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n # Show IP addresses\n # ips\n def do_ips(self, _):\n display.section(\"IP Addresses\")\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n # For each node\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n #MARK: - Routing\n # Show link weights\n # weights\n def do_weights(self, _):\n display.section(\"Weights\")\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for (i, j, w) in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n # Show costs of reaching every other switch\n # costs\n def do_costs(self, _):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Print cost of reaching 'end' switch from 'start' switch\n display.section(\"Total path costs\")\n print('From\\\\To'), ('\\t'.join(switches))\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if (start == end):\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if (isDirect(route)):\n # Print result for directly connected switches\n print(cost),\n else:\n # Print and highlight routes with intermediate switches\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n # Show least-cost paths from every switch to every other switch\n # routes\n def do_routes(self, _):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Print next hop switch\n display.section(\"First-Hop with lowest cost\")\n print('From\\\\To\\t'), ('\\t'.join(switches))\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if (start == end):\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if (isDirect(route)):\n # Print result for directly connected switches\n print(end),\n else:\n # Print and highlight routes with intermediate switches\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n # Show the complete shortest path from one switch to every other switch\n # paths\n def do_paths(self, line):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Least cost paths to every node\n display.section(\"Least-cost paths to other nodes\")\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if (start == end):\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' %\n (start, end, cost, route))\n\n #MARK: - OpenFlow\n # Display flows\n # flows\n def do_flows(self, _line):\n display.section(\"Showing all flows of all OVSSwitches\")\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n # Delete flows\n # deleteFlows\n\n def do_deleteFlows(self, _line):\n display.section(\"Deleting all flows of all OVSSwitches\")\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n # Display flow statistics\n # stats\n\n def do_stats(self, _):\n display.section(\"OpenFlow: Sent/Received Packets\")\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"')\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To' % (s.name))\n\n # MARK: - Run on every node\n # arps\n def do_arps(self, _line):\n display.section(\"ARP caches of all hosts\")\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n # netstats\n\n def do_netstats(self, _line):\n display.section(\"Routing Tables\")\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n # ifconfigs\n\n def do_ifconfigs(self, _line):\n display.section(\"Showing Interface Configuration\")\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n #MARK: - Other\n def do_xxx_testFlows1(self, _line):\n display.section(\"Adding test flows to Tiny Network\")\n self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n\n self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n #\t\tdisplay.section(\"Monitoring sent and received packets of all hosts\")\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name]\n for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section(\"Launching Wireshark\")\n sh = 'sudo wireshark &'\n\n locals = self.getLocals()\n _nodes = line.split()\n if not (_nodes):\n _nodes = self.mn.keys()\n for n in _nodes:\n if not (locals.__contains__(n)):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-ids": [
10,
18,
21,
26,
27
]
}
|
[
10,
18,
21,
26,
27
] |
<|reserved_special_token_0|>
def urls():
os.system('urls.bat')
def urls_1():
os.system('urls_1.bat')
<|reserved_special_token_0|>
def urls_3():
os.system('urls_3.bat')
def urls_4():
os.system('urls_4.bat')
def urls_5():
os.system('urls_5.bat')
def urls_6():
os.system('urls_6.bat')
def urls_7():
os.system('urls_7.bat')
def urls_8():
os.system('urls_8.bat')
<|reserved_special_token_0|>
def urls_11():
os.system('urls_11.bat')
<|reserved_special_token_0|>
def urls_13():
os.system('urls_13.bat')
def urls_14():
os.system('urls_14.bat')
def urls_15():
os.system('urls_15.bat')
<|reserved_special_token_0|>
def urls_17():
os.system('urls_17.bat')
def urls_18():
os.system('urls_18.bat')
def urls_19():
os.system('urls_19.bat')
<|reserved_special_token_0|>
def urls_21():
os.system('urls_22.bat')
<|reserved_special_token_0|>
def urls_23():
os.system('urls_23.bat')
def urls_24():
os.system('urls_24.bat')
<|reserved_special_token_0|>
def urls_29():
os.system('urls_29.bat')
<|reserved_special_token_0|>
def urls_31():
os.system('urls_32.bat')
def urls_32():
os.system('urls_32.bat')
def urls_33():
os.system('urls_33.bat')
<|reserved_special_token_0|>
def urls_35():
os.system('urls_35.bat')
def urls_36():
os.system('urls_36.bat')
def urls_37():
os.system('urls_37.bat')
<|reserved_special_token_0|>
def urls_38():
os.system('urls_39.bat')
<|reserved_special_token_0|>
def urls_40():
os.system('urls_41.bat')
<|reserved_special_token_0|>
def urls_44():
os.system('urls_44.bat')
<|reserved_special_token_0|>
def urls_46():
os.system('urls_46.bat')
<|reserved_special_token_0|>
def urls_47():
os.system('urls_48.bat')
def urls_48():
os.system('urls_49.bat')
<|reserved_special_token_0|>
def urls_52():
os.system('urls_52.bat')
<|reserved_special_token_0|>
def urls_55():
os.system('urls_55.bat')
<|reserved_special_token_0|>
def urls_57():
os.system('urls_58.bat')
def urls_58():
os.system('urls_59.bat')
<|reserved_special_token_0|>
def urls_61():
os.system('urls_61.bat')
def urls_62():
os.system('urls_62.bat')
def urls_63():
os.system('urls_63.bat')
def urls_64():
os.system('urls_64.bat')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def urls():
os.system('urls.bat')
def urls_1():
os.system('urls_1.bat')
<|reserved_special_token_0|>
def urls_3():
os.system('urls_3.bat')
def urls_4():
os.system('urls_4.bat')
def urls_5():
os.system('urls_5.bat')
def urls_6():
os.system('urls_6.bat')
def urls_7():
os.system('urls_7.bat')
def urls_8():
os.system('urls_8.bat')
def urls_9():
os.system('urls_9.bat')
<|reserved_special_token_0|>
def urls_11():
os.system('urls_11.bat')
def urls_12():
os.system('urls_12.bat')
def urls_13():
os.system('urls_13.bat')
def urls_14():
os.system('urls_14.bat')
def urls_15():
os.system('urls_15.bat')
def urls_16():
os.system('urls_16.bat')
def urls_17():
os.system('urls_17.bat')
def urls_18():
os.system('urls_18.bat')
def urls_19():
os.system('urls_19.bat')
def urls_20():
os.system('urls_20.bat')
<|reserved_special_token_0|>
def urls_21():
os.system('urls_22.bat')
<|reserved_special_token_0|>
def urls_23():
os.system('urls_23.bat')
def urls_24():
os.system('urls_24.bat')
def urls_25():
os.system('urls_25.bat')
def urls_26():
os.system('urls_26.bat')
def urls_27():
os.system('urls_27.bat')
<|reserved_special_token_0|>
def urls_29():
os.system('urls_29.bat')
<|reserved_special_token_0|>
def urls_31():
os.system('urls_32.bat')
def urls_32():
os.system('urls_32.bat')
def urls_33():
os.system('urls_33.bat')
def urls_34():
os.system('urls_34.bat')
def urls_35():
os.system('urls_35.bat')
def urls_36():
os.system('urls_36.bat')
def urls_37():
os.system('urls_37.bat')
def urls_38():
os.system('urls_38.bat')
def urls_38():
os.system('urls_39.bat')
<|reserved_special_token_0|>
def urls_40():
os.system('urls_41.bat')
def urls_41():
os.system('urls_41.bat')
<|reserved_special_token_0|>
def urls_44():
os.system('urls_44.bat')
<|reserved_special_token_0|>
def urls_46():
os.system('urls_46.bat')
def urls_47():
os.system('urls_47.bat')
def urls_47():
os.system('urls_48.bat')
def urls_48():
os.system('urls_49.bat')
<|reserved_special_token_0|>
def urls_51():
os.system('urls_51.bat')
def urls_52():
os.system('urls_52.bat')
def urls_53():
os.system('urls_53.bat')
<|reserved_special_token_0|>
def urls_55():
os.system('urls_55.bat')
<|reserved_special_token_0|>
def urls_56():
os.system('urls_57.bat')
def urls_57():
os.system('urls_58.bat')
def urls_58():
os.system('urls_59.bat')
<|reserved_special_token_0|>
def urls_61():
os.system('urls_61.bat')
def urls_62():
os.system('urls_62.bat')
def urls_63():
os.system('urls_63.bat')
def urls_64():
os.system('urls_64.bat')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def urls():
os.system('urls.bat')
def urls_1():
os.system('urls_1.bat')
<|reserved_special_token_0|>
def urls_3():
os.system('urls_3.bat')
def urls_4():
os.system('urls_4.bat')
def urls_5():
os.system('urls_5.bat')
def urls_6():
os.system('urls_6.bat')
def urls_7():
os.system('urls_7.bat')
def urls_8():
os.system('urls_8.bat')
def urls_9():
os.system('urls_9.bat')
<|reserved_special_token_0|>
def urls_11():
os.system('urls_11.bat')
def urls_12():
os.system('urls_12.bat')
def urls_13():
os.system('urls_13.bat')
def urls_14():
os.system('urls_14.bat')
def urls_15():
os.system('urls_15.bat')
def urls_16():
os.system('urls_16.bat')
def urls_17():
os.system('urls_17.bat')
def urls_18():
os.system('urls_18.bat')
def urls_19():
os.system('urls_19.bat')
def urls_20():
os.system('urls_20.bat')
<|reserved_special_token_0|>
def urls_21():
os.system('urls_22.bat')
<|reserved_special_token_0|>
def urls_23():
os.system('urls_23.bat')
def urls_24():
os.system('urls_24.bat')
def urls_25():
os.system('urls_25.bat')
def urls_26():
os.system('urls_26.bat')
def urls_27():
os.system('urls_27.bat')
def urls_28():
os.system('urls_28.bat')
def urls_29():
os.system('urls_29.bat')
<|reserved_special_token_0|>
def urls_31():
os.system('urls_32.bat')
def urls_32():
os.system('urls_32.bat')
def urls_33():
os.system('urls_33.bat')
def urls_34():
os.system('urls_34.bat')
def urls_35():
os.system('urls_35.bat')
def urls_36():
os.system('urls_36.bat')
def urls_37():
os.system('urls_37.bat')
def urls_38():
os.system('urls_38.bat')
def urls_38():
os.system('urls_39.bat')
<|reserved_special_token_0|>
def urls_40():
os.system('urls_41.bat')
def urls_41():
os.system('urls_41.bat')
<|reserved_special_token_0|>
def urls_43():
os.system('urls_43.bat')
def urls_44():
os.system('urls_44.bat')
def urls_45():
os.system('urls_45.bat')
def urls_46():
os.system('urls_46.bat')
def urls_47():
os.system('urls_47.bat')
def urls_47():
os.system('urls_48.bat')
def urls_48():
os.system('urls_49.bat')
<|reserved_special_token_0|>
def urls_51():
os.system('urls_51.bat')
def urls_52():
os.system('urls_52.bat')
def urls_53():
os.system('urls_53.bat')
def urls_54():
os.system('urls_54.bat')
def urls_55():
os.system('urls_55.bat')
def urls_56():
os.system('urls_56.bat')
def urls_56():
os.system('urls_57.bat')
def urls_57():
os.system('urls_58.bat')
def urls_58():
os.system('urls_59.bat')
<|reserved_special_token_0|>
def urls_60():
os.system('urls_60.bat')
def urls_61():
os.system('urls_61.bat')
def urls_62():
os.system('urls_62.bat')
def urls_63():
os.system('urls_63.bat')
def urls_64():
os.system('urls_64.bat')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def urls():
os.system('urls.bat')
def urls_1():
os.system('urls_1.bat')
<|reserved_special_token_0|>
def urls_3():
os.system('urls_3.bat')
def urls_4():
os.system('urls_4.bat')
def urls_5():
os.system('urls_5.bat')
def urls_6():
os.system('urls_6.bat')
def urls_7():
os.system('urls_7.bat')
def urls_8():
os.system('urls_8.bat')
def urls_9():
os.system('urls_9.bat')
<|reserved_special_token_0|>
def urls_11():
os.system('urls_11.bat')
def urls_12():
os.system('urls_12.bat')
def urls_13():
os.system('urls_13.bat')
def urls_14():
os.system('urls_14.bat')
def urls_15():
os.system('urls_15.bat')
def urls_16():
os.system('urls_16.bat')
def urls_17():
os.system('urls_17.bat')
def urls_18():
os.system('urls_18.bat')
def urls_19():
os.system('urls_19.bat')
def urls_20():
os.system('urls_20.bat')
<|reserved_special_token_0|>
def urls_21():
os.system('urls_22.bat')
def urls_22():
os.system('urls_23.bat')
def urls_23():
os.system('urls_23.bat')
def urls_24():
os.system('urls_24.bat')
def urls_25():
os.system('urls_25.bat')
def urls_26():
os.system('urls_26.bat')
def urls_27():
os.system('urls_27.bat')
def urls_28():
os.system('urls_28.bat')
def urls_29():
os.system('urls_29.bat')
def urls_29():
os.system('urls_30.bat')
def urls_30():
os.system('urls_31.bat')
def urls_31():
os.system('urls_32.bat')
def urls_32():
os.system('urls_32.bat')
def urls_33():
os.system('urls_33.bat')
def urls_34():
os.system('urls_34.bat')
def urls_35():
os.system('urls_35.bat')
def urls_36():
os.system('urls_36.bat')
def urls_37():
os.system('urls_37.bat')
def urls_38():
os.system('urls_38.bat')
def urls_38():
os.system('urls_39.bat')
<|reserved_special_token_0|>
def urls_40():
os.system('urls_41.bat')
def urls_41():
os.system('urls_41.bat')
def urls_42():
os.system('urls_42.bat')
def urls_43():
os.system('urls_43.bat')
def urls_44():
os.system('urls_44.bat')
def urls_45():
os.system('urls_45.bat')
def urls_46():
os.system('urls_46.bat')
def urls_47():
os.system('urls_47.bat')
def urls_47():
os.system('urls_48.bat')
def urls_48():
os.system('urls_49.bat')
def urls_49():
os.system('urls_50.bat')
def urls_50():
os.system('urls_50.bat')
def urls_51():
os.system('urls_51.bat')
def urls_52():
os.system('urls_52.bat')
def urls_53():
os.system('urls_53.bat')
def urls_54():
os.system('urls_54.bat')
def urls_55():
os.system('urls_55.bat')
def urls_56():
os.system('urls_56.bat')
def urls_56():
os.system('urls_57.bat')
def urls_57():
os.system('urls_58.bat')
def urls_58():
os.system('urls_59.bat')
def urls_59():
os.system('urls_59.bat')
def urls_60():
os.system('urls_60.bat')
def urls_61():
os.system('urls_61.bat')
def urls_62():
os.system('urls_62.bat')
def urls_63():
os.system('urls_63.bat')
def urls_64():
os.system('urls_64.bat')
def urls_65():
os.system('urls_65.bat')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import os
import multiprocessing
import time
import psycopg2
#os.system("myproject1\\runscrapy2.py")
#from scrapy import cmdline
#os.system("scrapy crawl parts")
#cmdline.execute("cd myproject1".split())
#cmdline.execute("myproject1\\runscrapy.bat".split())
# start = time.perf_counter()
connection = psycopg2.connect(
host="localhost",
user="postgres",
database="SCRAPY_DB",
password="yolo12",
)
cursor = connection.cursor()
cursor.execute("DROP TABLE IF EXISTS aa_otoyedekcim")
cursor.execute("CREATE TABLE IF NOT EXISTS aa_otoyedekcim (part_no TEXT, description TEXT, price TEXT, cur_url TEXT)")
connection.commit()
connection.close()
def urls():
os.system('urls.bat')
def urls_1():
os.system('urls_1.bat')
def urls_2():
os.system('urls_2.bat')
def urls_3():
os.system('urls_3.bat')
def urls_4():
os.system('urls_4.bat')
def urls_5():
os.system('urls_5.bat')
def urls_6():
os.system('urls_6.bat')
def urls_7():
os.system('urls_7.bat')
def urls_8():
os.system('urls_8.bat')
def urls_9():
os.system('urls_9.bat')
def urls_10():
os.system('urls_10.bat')
def urls_11():
os.system('urls_11.bat')
def urls_12():
os.system('urls_12.bat')
def urls_13():
os.system('urls_13.bat')
def urls_14():
os.system('urls_14.bat')
def urls_15():
os.system('urls_15.bat')
def urls_16():
os.system('urls_16.bat')
def urls_17():
os.system('urls_17.bat')
def urls_18():
os.system('urls_18.bat')
def urls_19():
os.system('urls_19.bat')
def urls_20():
os.system('urls_20.bat')
def urls_20():
os.system('urls_21.bat')
def urls_21():
os.system('urls_22.bat')
def urls_22():
os.system('urls_23.bat')
def urls_23():
os.system('urls_23.bat')
def urls_24():
os.system('urls_24.bat')
def urls_25():
os.system('urls_25.bat')
def urls_26():
os.system('urls_26.bat')
def urls_27():
os.system('urls_27.bat')
def urls_28():
os.system('urls_28.bat')
def urls_29():
os.system('urls_29.bat')
def urls_29():
os.system('urls_30.bat')
def urls_30():
os.system('urls_31.bat')
def urls_31():
os.system('urls_32.bat')
def urls_32():
os.system('urls_32.bat')
def urls_33():
os.system('urls_33.bat')
def urls_34():
os.system('urls_34.bat')
def urls_35():
os.system('urls_35.bat')
def urls_36():
os.system('urls_36.bat')
def urls_37():
os.system('urls_37.bat')
def urls_38():
os.system('urls_38.bat')
def urls_38():
os.system('urls_39.bat')
def urls_39():
os.system('urls_40.bat')
def urls_40():
os.system('urls_41.bat')
def urls_41():
os.system('urls_41.bat')
def urls_42():
os.system('urls_42.bat')
def urls_43():
os.system('urls_43.bat')
def urls_44():
os.system('urls_44.bat')
def urls_45():
os.system('urls_45.bat')
def urls_46():
os.system('urls_46.bat')
def urls_47():
os.system('urls_47.bat')
def urls_47():
os.system('urls_48.bat')
def urls_48():
os.system('urls_49.bat')
def urls_49():
os.system('urls_50.bat')
def urls_50():
os.system('urls_50.bat')
def urls_51():
os.system('urls_51.bat')
def urls_52():
os.system('urls_52.bat')
def urls_53():
os.system('urls_53.bat')
def urls_54():
os.system('urls_54.bat')
def urls_55():
os.system('urls_55.bat')
def urls_56():
os.system('urls_56.bat')
def urls_56():
os.system('urls_57.bat')
def urls_57():
os.system('urls_58.bat')
def urls_58():
os.system('urls_59.bat')
def urls_59():
os.system('urls_59.bat')
def urls_60():
os.system('urls_60.bat')
def urls_61():
os.system('urls_61.bat')
def urls_62():
os.system('urls_62.bat')
def urls_63():
os.system('urls_63.bat')
def urls_64():
os.system('urls_64.bat')
def urls_65():
os.system('urls_65.bat')
u1 = multiprocessing.Process(target=urls)
u2 = multiprocessing.Process(target=urls_1)
u3 = multiprocessing.Process(target=urls_2)
u4 = multiprocessing.Process(target=urls_3)
u5 = multiprocessing.Process(target=urls_4)
u6 = multiprocessing.Process(target=urls_5)
u7 = multiprocessing.Process(target=urls_6)
u8 = multiprocessing.Process(target=urls_7)
u9 = multiprocessing.Process(target=urls_8)
u10 = multiprocessing.Process(target=urls_9)
u11 = multiprocessing.Process(target=urls_10)
u12 = multiprocessing.Process(target=urls_11)
u13 = multiprocessing.Process(target=urls_12)
u14 = multiprocessing.Process(target=urls_13)
u15 = multiprocessing.Process(target=urls_14)
u16 = multiprocessing.Process(target=urls_15)
u16 = multiprocessing.Process(target=urls_16)
u17 = multiprocessing.Process(target=urls_17)
u18 = multiprocessing.Process(target=urls_18)
u19 = multiprocessing.Process(target=urls_19)
u20 = multiprocessing.Process(target=urls_20)
u21 = multiprocessing.Process(target=urls_21)
u22 = multiprocessing.Process(target=urls_22)
u23 = multiprocessing.Process(target=urls_23)
u24 = multiprocessing.Process(target=urls_24)
u25 = multiprocessing.Process(target=urls_25)
u26 = multiprocessing.Process(target=urls_26)
u27 = multiprocessing.Process(target=urls_27)
u28 = multiprocessing.Process(target=urls_28)
u29 = multiprocessing.Process(target=urls_29)
u30 = multiprocessing.Process(target=urls_30)
u31 = multiprocessing.Process(target=urls_31)
u32 = multiprocessing.Process(target=urls_32)
u33 = multiprocessing.Process(target=urls_33)
u34 = multiprocessing.Process(target=urls_34)
u35 = multiprocessing.Process(target=urls_35)
u36 = multiprocessing.Process(target=urls_36)
u37 = multiprocessing.Process(target=urls_37)
u38 = multiprocessing.Process(target=urls_38)
u39 = multiprocessing.Process(target=urls_39)
u40 = multiprocessing.Process(target=urls_40)
u41 = multiprocessing.Process(target=urls_41)
u42 = multiprocessing.Process(target=urls_42)
u43 = multiprocessing.Process(target=urls_43)
u44 = multiprocessing.Process(target=urls_44)
u45 = multiprocessing.Process(target=urls_45)
u46 = multiprocessing.Process(target=urls_46)
u47 = multiprocessing.Process(target=urls_47)
u48 = multiprocessing.Process(target=urls_48)
u49 = multiprocessing.Process(target=urls_49)
u50 = multiprocessing.Process(target=urls_50)
u51 = multiprocessing.Process(target=urls_51)
u52 = multiprocessing.Process(target=urls_52)
u53 = multiprocessing.Process(target=urls_53)
u54 = multiprocessing.Process(target=urls_54)
u55 = multiprocessing.Process(target=urls_55)
u56 = multiprocessing.Process(target=urls_56)
u57 = multiprocessing.Process(target=urls_57)
u58 = multiprocessing.Process(target=urls_58)
u59 = multiprocessing.Process(target=urls_59)
u60 = multiprocessing.Process(target=urls_60)
u61 = multiprocessing.Process(target=urls_61)
u62 = multiprocessing.Process(target=urls_62)
u63 = multiprocessing.Process(target=urls_63)
u64 = multiprocessing.Process(target=urls_64)
u65 = multiprocessing.Process(target=urls_65)
if __name__ == '__main__':
u1.start()
u2.start()
u3.start()
u4.start()
u5.start()
u6.start()
u7.start()
u8.start()
u9.start()
u10.start()
u11.start()
u12.start()
u13.start()
u14.start()
u15.start()
u16.start()
u17.start()
u18.start()
u19.start()
u20.start()
u21.start()
u22.start()
u1.join()
u2.join()
u3.join()
u4.join()
u5.join()
u6.join()
u7.join()
u8.join()
u9.join()
u10.join()
u11.join()
u12.join()
u13.join()
u14.join()
u15.join()
u16.join()
u17.join()
u18.join()
u19.join()
u20.join()
u21.join()
u22.join()
os.system('1_runmebaby_3.bat')
'''
if __name__ == '__main__':
u23.start()
u24.start()
u25.start()
u26.start()
u27.start()
u28.start()
u29.start()
u30.start()
u31.start()
u32.start()
u33.start()
u34.start()
u35.start()
u36.start()
u37.start()
u38.start()
u39.start()
u40.start()
u41.start()
u42.start()
u43.start()
u44.start()
u23.join()
u24.join()
u25.join()
u26.join()
u27.join()
u28.join()
u29.join()
u30.join()
u31.join()
u32.join()
u33.join()
u34.join()
u35.join()
u36.join()
u37.join()
u38.join()
u39.join()
u40.join()
u41.join()
u42.join()
u43.join()
u44.join()
if __name__ == '__main__':
u45.start()
u46.start()
u47.start()
u48.start()
u49.start()
u50.start()
u51.start()
u52.start()
u53.start()
u54.start()
u55.start()
u56.start()
u57.start()
u58.start()
u59.start()
u60.start()
u61.start()
u62.start()
u63.start()
u64.start()
u65.start()
u45.join()
u46.join()
u47.join()
u48.join()
u49.join()
u50.join()
u51.join()
u52.join()
u53.join()
u54.join()
u55.join()
u56.join()
u57.join()
u58.join()
u59.join()
u60.join()
u61.join()
u62.join()
u63.join()
u64.join()
u65.join()
# finish = time.perf_counter()
# print(f'Successfully finished in {round((finish-start)/60, 2)} minute(s) bro')
'''
|
flexible
|
{
"blob_id": "8e0d729fa55aabede123d89a507296b7d8a45c8b",
"index": 1705,
"step-1": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\n<mask token>\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\n<mask token>\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\n<mask token>\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\n<mask token>\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\n<mask token>\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\n<mask token>\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\n<mask token>\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\n<mask token>\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\n<mask token>\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\n<mask token>\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\n<mask token>\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\n<mask token>\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\n<mask token>\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\n<mask token>\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\ndef urls_9():\n os.system('urls_9.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\ndef urls_12():\n os.system('urls_12.bat')\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\ndef urls_16():\n os.system('urls_16.bat')\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\ndef urls_20():\n os.system('urls_20.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\n<mask token>\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\ndef urls_25():\n os.system('urls_25.bat')\n\n\ndef urls_26():\n os.system('urls_26.bat')\n\n\ndef urls_27():\n os.system('urls_27.bat')\n\n\n<mask token>\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\n<mask token>\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\ndef urls_34():\n os.system('urls_34.bat')\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\ndef urls_38():\n os.system('urls_38.bat')\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\ndef urls_41():\n os.system('urls_41.bat')\n\n\n<mask token>\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\n<mask token>\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\ndef urls_47():\n os.system('urls_47.bat')\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\n<mask token>\n\n\ndef urls_51():\n os.system('urls_51.bat')\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\ndef urls_53():\n os.system('urls_53.bat')\n\n\n<mask token>\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\n<mask token>\n\n\ndef urls_56():\n os.system('urls_57.bat')\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\n<mask token>\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\ndef urls_9():\n os.system('urls_9.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\ndef urls_12():\n os.system('urls_12.bat')\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\ndef urls_16():\n os.system('urls_16.bat')\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\ndef urls_20():\n os.system('urls_20.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\n<mask token>\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\ndef urls_25():\n os.system('urls_25.bat')\n\n\ndef urls_26():\n os.system('urls_26.bat')\n\n\ndef urls_27():\n os.system('urls_27.bat')\n\n\ndef urls_28():\n os.system('urls_28.bat')\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\n<mask token>\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\ndef urls_34():\n os.system('urls_34.bat')\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\ndef urls_38():\n os.system('urls_38.bat')\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\ndef urls_41():\n os.system('urls_41.bat')\n\n\n<mask token>\n\n\ndef urls_43():\n os.system('urls_43.bat')\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\ndef urls_45():\n os.system('urls_45.bat')\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\ndef urls_47():\n os.system('urls_47.bat')\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\n<mask token>\n\n\ndef urls_51():\n os.system('urls_51.bat')\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\ndef urls_53():\n os.system('urls_53.bat')\n\n\ndef urls_54():\n os.system('urls_54.bat')\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\ndef urls_56():\n os.system('urls_56.bat')\n\n\ndef urls_56():\n os.system('urls_57.bat')\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\n<mask token>\n\n\ndef urls_60():\n os.system('urls_60.bat')\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\ndef urls_9():\n os.system('urls_9.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\ndef urls_12():\n os.system('urls_12.bat')\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\ndef urls_16():\n os.system('urls_16.bat')\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\ndef urls_20():\n os.system('urls_20.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\ndef urls_22():\n os.system('urls_23.bat')\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\ndef urls_25():\n os.system('urls_25.bat')\n\n\ndef urls_26():\n os.system('urls_26.bat')\n\n\ndef urls_27():\n os.system('urls_27.bat')\n\n\ndef urls_28():\n os.system('urls_28.bat')\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\ndef urls_29():\n os.system('urls_30.bat')\n\n\ndef urls_30():\n os.system('urls_31.bat')\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\ndef urls_34():\n os.system('urls_34.bat')\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\ndef urls_38():\n os.system('urls_38.bat')\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\ndef urls_41():\n os.system('urls_41.bat')\n\n\ndef urls_42():\n os.system('urls_42.bat')\n\n\ndef urls_43():\n os.system('urls_43.bat')\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\ndef urls_45():\n os.system('urls_45.bat')\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\ndef urls_47():\n os.system('urls_47.bat')\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\ndef urls_49():\n os.system('urls_50.bat')\n\n\ndef urls_50():\n os.system('urls_50.bat')\n\n\ndef urls_51():\n os.system('urls_51.bat')\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\ndef urls_53():\n os.system('urls_53.bat')\n\n\ndef urls_54():\n os.system('urls_54.bat')\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\ndef urls_56():\n os.system('urls_56.bat')\n\n\ndef urls_56():\n os.system('urls_57.bat')\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\ndef urls_59():\n os.system('urls_59.bat')\n\n\ndef urls_60():\n os.system('urls_60.bat')\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\ndef urls_65():\n os.system('urls_65.bat')\n\n\n<mask token>\n",
"step-5": "import os\nimport multiprocessing\nimport time\nimport psycopg2\n#os.system(\"myproject1\\\\runscrapy2.py\")\n\n#from scrapy import cmdline\n#os.system(\"scrapy crawl parts\")\n#cmdline.execute(\"cd myproject1\".split())\n#cmdline.execute(\"myproject1\\\\runscrapy.bat\".split())\n\n# start = time.perf_counter()\n\n\n\nconnection = psycopg2.connect(\n host=\"localhost\",\n user=\"postgres\",\n database=\"SCRAPY_DB\",\n password=\"yolo12\",\n)\ncursor = connection.cursor()\n\n\ncursor.execute(\"DROP TABLE IF EXISTS aa_otoyedekcim\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS aa_otoyedekcim (part_no TEXT, description TEXT, price TEXT, cur_url TEXT)\")\nconnection.commit()\nconnection.close()\n\n\n\n\n\ndef urls():\n os.system('urls.bat')\n\ndef urls_1():\n os.system('urls_1.bat')\n\ndef urls_2():\n os.system('urls_2.bat')\n\ndef urls_3():\n os.system('urls_3.bat')\n\ndef urls_4():\n os.system('urls_4.bat')\n\ndef urls_5():\n os.system('urls_5.bat')\n\ndef urls_6():\n os.system('urls_6.bat')\n\ndef urls_7():\n os.system('urls_7.bat')\n\ndef urls_8():\n os.system('urls_8.bat')\n\ndef urls_9():\n os.system('urls_9.bat')\n\ndef urls_10():\n os.system('urls_10.bat')\n\ndef urls_11():\n os.system('urls_11.bat')\n\ndef urls_12():\n os.system('urls_12.bat')\n\ndef urls_13():\n os.system('urls_13.bat')\n\ndef urls_14():\n os.system('urls_14.bat')\n\ndef urls_15():\n os.system('urls_15.bat')\n\ndef urls_16():\n os.system('urls_16.bat')\n\ndef urls_17():\n os.system('urls_17.bat')\n\ndef urls_18():\n os.system('urls_18.bat')\n\ndef urls_19():\n os.system('urls_19.bat')\n\ndef urls_20():\n os.system('urls_20.bat')\n\ndef urls_20():\n os.system('urls_21.bat')\n\ndef urls_21():\n os.system('urls_22.bat')\n\ndef urls_22():\n os.system('urls_23.bat')\n\ndef urls_23():\n os.system('urls_23.bat')\n\ndef urls_24():\n os.system('urls_24.bat')\n\ndef urls_25():\n os.system('urls_25.bat')\n\ndef urls_26():\n os.system('urls_26.bat')\n\ndef urls_27():\n os.system('urls_27.bat')\n\ndef urls_28():\n os.system('urls_28.bat')\n\ndef urls_29():\n os.system('urls_29.bat')\n\ndef urls_29():\n os.system('urls_30.bat')\n\ndef urls_30():\n os.system('urls_31.bat')\n\ndef urls_31():\n os.system('urls_32.bat')\n\ndef urls_32():\n os.system('urls_32.bat')\n\ndef urls_33():\n os.system('urls_33.bat')\n\ndef urls_34():\n os.system('urls_34.bat')\n\ndef urls_35():\n os.system('urls_35.bat')\n\ndef urls_36():\n os.system('urls_36.bat')\n\ndef urls_37():\n os.system('urls_37.bat')\n\ndef urls_38():\n os.system('urls_38.bat')\n\ndef urls_38():\n os.system('urls_39.bat')\n\ndef urls_39():\n os.system('urls_40.bat')\n\ndef urls_40():\n os.system('urls_41.bat')\n\ndef urls_41():\n os.system('urls_41.bat')\n\ndef urls_42():\n os.system('urls_42.bat')\n\ndef urls_43():\n os.system('urls_43.bat')\n\ndef urls_44():\n os.system('urls_44.bat')\n\ndef urls_45():\n os.system('urls_45.bat')\n\ndef urls_46():\n os.system('urls_46.bat')\n\ndef urls_47():\n os.system('urls_47.bat')\n\ndef urls_47():\n os.system('urls_48.bat')\n\ndef urls_48():\n os.system('urls_49.bat')\n\ndef urls_49():\n os.system('urls_50.bat')\n\ndef urls_50():\n os.system('urls_50.bat')\n\ndef urls_51():\n os.system('urls_51.bat')\n\ndef urls_52():\n os.system('urls_52.bat')\n\ndef urls_53():\n os.system('urls_53.bat')\n\ndef urls_54():\n os.system('urls_54.bat')\n\ndef urls_55():\n os.system('urls_55.bat')\n\ndef urls_56():\n os.system('urls_56.bat')\n\ndef urls_56():\n os.system('urls_57.bat')\n\ndef urls_57():\n os.system('urls_58.bat')\n\ndef urls_58():\n os.system('urls_59.bat')\n\ndef urls_59():\n os.system('urls_59.bat')\n\ndef urls_60():\n os.system('urls_60.bat')\n\ndef urls_61():\n os.system('urls_61.bat')\n\ndef urls_62():\n os.system('urls_62.bat')\n\ndef urls_63():\n os.system('urls_63.bat')\n\ndef urls_64():\n os.system('urls_64.bat')\n\ndef urls_65():\n os.system('urls_65.bat')\n\n\n\n\n\nu1 = multiprocessing.Process(target=urls)\nu2 = multiprocessing.Process(target=urls_1)\nu3 = multiprocessing.Process(target=urls_2)\nu4 = multiprocessing.Process(target=urls_3)\nu5 = multiprocessing.Process(target=urls_4)\nu6 = multiprocessing.Process(target=urls_5)\nu7 = multiprocessing.Process(target=urls_6)\nu8 = multiprocessing.Process(target=urls_7)\nu9 = multiprocessing.Process(target=urls_8)\nu10 = multiprocessing.Process(target=urls_9)\nu11 = multiprocessing.Process(target=urls_10)\nu12 = multiprocessing.Process(target=urls_11)\nu13 = multiprocessing.Process(target=urls_12)\nu14 = multiprocessing.Process(target=urls_13)\nu15 = multiprocessing.Process(target=urls_14)\nu16 = multiprocessing.Process(target=urls_15)\nu16 = multiprocessing.Process(target=urls_16)\nu17 = multiprocessing.Process(target=urls_17)\nu18 = multiprocessing.Process(target=urls_18)\nu19 = multiprocessing.Process(target=urls_19)\nu20 = multiprocessing.Process(target=urls_20)\nu21 = multiprocessing.Process(target=urls_21)\nu22 = multiprocessing.Process(target=urls_22)\nu23 = multiprocessing.Process(target=urls_23)\nu24 = multiprocessing.Process(target=urls_24)\nu25 = multiprocessing.Process(target=urls_25)\nu26 = multiprocessing.Process(target=urls_26)\nu27 = multiprocessing.Process(target=urls_27)\nu28 = multiprocessing.Process(target=urls_28)\nu29 = multiprocessing.Process(target=urls_29)\nu30 = multiprocessing.Process(target=urls_30)\nu31 = multiprocessing.Process(target=urls_31)\nu32 = multiprocessing.Process(target=urls_32)\nu33 = multiprocessing.Process(target=urls_33)\nu34 = multiprocessing.Process(target=urls_34)\nu35 = multiprocessing.Process(target=urls_35)\nu36 = multiprocessing.Process(target=urls_36)\nu37 = multiprocessing.Process(target=urls_37)\nu38 = multiprocessing.Process(target=urls_38)\nu39 = multiprocessing.Process(target=urls_39)\nu40 = multiprocessing.Process(target=urls_40)\nu41 = multiprocessing.Process(target=urls_41)\nu42 = multiprocessing.Process(target=urls_42)\nu43 = multiprocessing.Process(target=urls_43)\nu44 = multiprocessing.Process(target=urls_44)\nu45 = multiprocessing.Process(target=urls_45)\nu46 = multiprocessing.Process(target=urls_46)\nu47 = multiprocessing.Process(target=urls_47)\nu48 = multiprocessing.Process(target=urls_48)\nu49 = multiprocessing.Process(target=urls_49)\nu50 = multiprocessing.Process(target=urls_50)\nu51 = multiprocessing.Process(target=urls_51)\nu52 = multiprocessing.Process(target=urls_52)\nu53 = multiprocessing.Process(target=urls_53)\nu54 = multiprocessing.Process(target=urls_54)\nu55 = multiprocessing.Process(target=urls_55)\nu56 = multiprocessing.Process(target=urls_56)\nu57 = multiprocessing.Process(target=urls_57)\nu58 = multiprocessing.Process(target=urls_58)\nu59 = multiprocessing.Process(target=urls_59)\nu60 = multiprocessing.Process(target=urls_60)\nu61 = multiprocessing.Process(target=urls_61)\nu62 = multiprocessing.Process(target=urls_62)\nu63 = multiprocessing.Process(target=urls_63)\nu64 = multiprocessing.Process(target=urls_64)\nu65 = multiprocessing.Process(target=urls_65)\n\n\nif __name__ == '__main__':\n u1.start()\n u2.start()\n u3.start()\n u4.start()\n u5.start()\n u6.start()\n u7.start()\n u8.start()\n u9.start()\n u10.start()\n u11.start()\n u12.start()\n u13.start()\n u14.start()\n u15.start()\n u16.start()\n u17.start()\n u18.start()\n u19.start()\n u20.start()\n u21.start()\n u22.start()\n\n u1.join()\n u2.join()\n u3.join()\n u4.join()\n u5.join()\n u6.join()\n u7.join()\n u8.join()\n u9.join()\n u10.join()\n u11.join()\n u12.join()\n u13.join()\n u14.join()\n u15.join()\n u16.join()\n u17.join()\n u18.join()\n u19.join()\n u20.join()\n u21.join()\n u22.join()\n\n os.system('1_runmebaby_3.bat')\n\n'''\nif __name__ == '__main__':\n u23.start()\n u24.start()\n u25.start()\n u26.start()\n u27.start()\n u28.start()\n u29.start()\n u30.start()\n u31.start()\n u32.start()\n u33.start()\n u34.start()\n u35.start()\n u36.start()\n u37.start()\n u38.start()\n u39.start()\n u40.start()\n u41.start()\n u42.start()\n u43.start()\n u44.start()\n\n u23.join()\n u24.join()\n u25.join()\n u26.join()\n u27.join()\n u28.join()\n u29.join()\n u30.join()\n u31.join()\n u32.join()\n u33.join()\n u34.join()\n u35.join()\n u36.join()\n u37.join()\n u38.join()\n u39.join()\n u40.join()\n u41.join()\n u42.join()\n u43.join()\n u44.join()\n\n\n\nif __name__ == '__main__':\n u45.start()\n u46.start()\n u47.start()\n u48.start()\n u49.start()\n u50.start()\n u51.start()\n u52.start()\n u53.start()\n u54.start()\n u55.start()\n u56.start()\n u57.start()\n u58.start()\n u59.start()\n u60.start()\n u61.start()\n u62.start()\n u63.start()\n u64.start()\n u65.start()\n\n u45.join()\n u46.join()\n u47.join()\n u48.join()\n u49.join()\n u50.join()\n u51.join()\n u52.join()\n u53.join()\n u54.join()\n u55.join()\n u56.join()\n u57.join()\n u58.join()\n u59.join()\n u60.join()\n u61.join()\n u62.join()\n u63.join()\n u64.join()\n u65.join()\n\n # finish = time.perf_counter()\n # print(f'Successfully finished in {round((finish-start)/60, 2)} minute(s) bro')\n\n'''",
"step-ids": [
39,
53,
59,
67,
75
]
}
|
[
39,
53,
59,
67,
75
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test():
T = [1, 2, 3, 1, 0, 4]
res = find_days(T)
assert res == [1, 1, 3, 2, 1, 0]
<|reserved_special_token_1|>
from solution import find_days
import pudb
def test():
T = [1, 2, 3, 1, 0, 4]
res = find_days(T)
assert res == [1, 1, 3, 2, 1, 0]
<|reserved_special_token_1|>
from solution import find_days
import pudb
def test():
T = [1, 2, 3, 1, 0, 4]
# pudb.set_trace()
res = find_days(T)
assert res == [1, 1, 3, 2, 1, 0]
|
flexible
|
{
"blob_id": "db36c82717aa0bacffce7a3e2724ed2bb586c7fb",
"index": 7862,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test():\n T = [1, 2, 3, 1, 0, 4]\n res = find_days(T)\n assert res == [1, 1, 3, 2, 1, 0]\n",
"step-3": "from solution import find_days\nimport pudb\n\n\ndef test():\n T = [1, 2, 3, 1, 0, 4]\n res = find_days(T)\n assert res == [1, 1, 3, 2, 1, 0]\n",
"step-4": "from solution import find_days\nimport pudb\n\n\ndef test():\n T = [1, 2, 3, 1, 0, 4]\n # pudb.set_trace()\n res = find_days(T)\n assert res == [1, 1, 3, 2, 1, 0]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
The MIT License (MIT)
Copyright (c) 2015 Tommy Carpenter
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
"""
Warning; here be dragons. Documentation needed.
No try excepts here unless the MR job can complete without them!
Fail fast and have the exception stack show up in the Hadoop interface logs
"""
def _filtering_parsing_helper(filter_cols_key, filter_vals_key, filter_invert_key):
filter_vals = os.environ[filter_vals_key].split("|")
inverts = [int(y) for y in os.environ[filter_invert_key].split("|")]
filter_dict = {}
for xindex, x in enumerate([int(y) for y in os.environ[filter_cols_key].split("|")]):
filter_dict[x] = {}
filter_dict[x]["filter_vals"] = filter_vals[xindex].split(",")
filter_dict[x]["invert"] = inverts[xindex]
return filter_dict
def _filtering_passed_helper(filter_dict, vals):
yield_row = True
for filter_col in filter_dict.keys():
if (filter_dict[filter_col]["invert"] and vals[filter_col] in filter_dict[filter_col]["filter_vals"]) or (not filter_dict[filter_col]["invert"] and vals[filter_col] not in filter_dict[filter_col]["filter_vals"]):
yield_row = False
break
return yield_row
def _kv_helper(cache, value):
"""shared code between select_where and select_join
splits vals, see if filtering passes, forms the key from key_columns and forms the values from target_columns
"""
vals = [v.replace('"','') for v in value.split(cache["delimiter"])]
if "filtering" not in cache or _filtering_passed_helper(cache["filtering"], vals): #yield if filtering criteria met or no filtering criteria
k = "+".join(vals) if cache["key_columns"] == "*" else "+".join(vals[l] for l in cache["key_columns"])
v = ",".join(vals) if cache["target_columns"] == "*" else ",".join([vals[l] for l in cache["target_columns"]])
return k, v
return None, None
def identity_mapper(key, value):
""" Does Nothing; used when all work done in reduce phase
"""
yield key, value
def token_count_mapper(key, value):
""" Purpose:
Splits lines of each file by whitespace (value holds lines), and emits (token, 1) for each token in the line.
When combined with a basic count reducer, implements wordcount.
Args:
key: byte offset (not used in this function)
value: (string)
Yields:
A series of tuples of the form (key, 1)
"""
for token in value.split():
yield token, 1
def select_where(key, value, cache={}):
"""
PURPOSE:
When combined with an identiy reducer this implements:
SELECT (k, v)
where k = target_column_1+target_column_2+...,+target_column_N,
where v = target_column_1, ..., target_column_N
FROM (input dataset)
GROUP BY target_column_1, ..., target_column_N;
WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...
When combined with a count reducer this implements:
SELECT (k, v)
where k = target_column_1+target_column_2+...,+target_column_N,
where v = count(*)
FROM (input dataset)
GROUP BY target_column_1, ..., target_column_N;
WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...
Args:
key: byte offset (not used in this function; returned as is)
value: (string)
via jobconfs (MANDATORY) - target_columns: can be
1) "*" : all columns selected as return value
2) comma delimited list of ints as a string like "1,2,3"
via jobconfs (MANDATORY) - key_columns: can be
1) "*" : all columns selected as return value
2) comma delimited list of ints as a string like "1,2,3"
via jobconfs (OPTIONAL) - delimiter: the delimter the file is split on
via jobconfs (OPTIONAL) - filter_columns : pipe delimited list of ints like: 1|2|3. This list will be split
and entry i will be used with filter vals list i
via jobconfs (OPTIONAL) - filter_vals: is a pipe delimited list of comma delimited list of strings as a string like a,b,c|d,e,f|,..
this list is split on | and entry i is used as the filter list for filter column i
via jobconfs (OPTIONAL) - invert_filter_vals: pipe delimited list of boolean integers, e.g., 0|1|0...
this list is split, and used to trigger "not in" instead of in (like WHERE NOT)try i is 1, then values are selected where filter column i is
NOT in filter_vals list i
All three of these must be passed in or nothing happens (no where clause)
EXAMPLE:
jobconf = ['filter_columns=1|2, filtervals=a,b|c,d, invert_filter_vals = 0|1
Does a
SELECT * where column[1] in ["a","b"] and column[2] NOT in ["c,d"]
Note that you can pass in the same column twice where invert_filter_vals = 0 then 1, e.g.,:
jobconf = ['filter_columns=1|1, filtervals=a,b|c,d, invert_filter_vals = 0|1
to get a "column[1] in ["a","b"] but not in ["c","d"] effect.
Yields:
(k, v)
where k = target_column_1+target_column_2+...,+target_column_N,
where v = target_column_1, ..., target_column_N)
for the subset of (key, value) inputs matching the where clause
"""
if not "filtering" in cache and "filter_columns" in os.environ and "filter_vals" in os.environ and "invert_filter_vals" in os.environ:
cache["filtering"] = _filtering_parsing_helper("filter_columns", "filter_vals", "invert_filter_vals")
if not "delimiter" in cache:
cache["delimiter"] = os.environ["delimiter"]
if not "target_columns" in cache:
if os.environ["target_columns"] == "*":
cache["target_columns"] = "*"
else:
cache["target_columns"] = [int(x) for x in os.environ["target_columns"].split(",")] #list
if not "key_columns" in cache:
if os.environ["key_columns"] == "*":
cache["key_columns"] = "*"
else:
cache["key_columns"] = [int(x) for x in os.environ["key_columns"].split(",")] #list
k, v = _kv_helper(cache, value)
if k and v:
yield k,v
def join_mapper(key, value, cache={}):
""""table" refers to all files in one HDFS root directory below:
PURPOSE:
Very similar to "select_where_mapper" except the key output is different.
Outputs (key_1+key_2,..., value) where key_1, key_2,... are given by the columns specified in key_columns.
When run on tables I_1, I_2 that share keys "1,2,3",
where I_1 has the shared keys in columns A,B,C
and I_2 has they shared keys in columns D,C,E,
then run with join_inner_reducer, this implements:
SELECT I_1.COL_a, I_1.COL_b,.. I_2.COL_a, I_2.COL_b,
FROM I_1 INNER JOIN I_2
ON I_1.key1 = I_2.key1, I_1.key=I_2.key2,...
WHERE I_1.filter_column_1 (not) in filter_vals_1_for_I_1, I_1.filter_column_2 (not) in filter_vals_2_for_I_1, ...
and I_2.filter_column_1 (not) in filter_vals_1_for_I_2, I_2.filter_column_2 (not) in filter_vals_2_for_I_2, ...
Args:
key: byte offset (not used in this function; returned as is)
value: (string)
via jobconfs (MANDATORY) - table_1_path='...' string representing the HDFS path(s) (if multiple, should be a string with commas between the individual paths) of the files of "table 1". used to parse out the key columns from this table when table 2 has the same keys but in different columns
via jobconfs (MANDATORY) - table_2_path='...' " "
via jobconfs (MANDATORY) - table_1_key_columns=1,2,3': comma delimited list of ints as a string like "1,2,3"
via jobconfs (MANDATORY) - table_2_key_columns=1,2,3': " "
via jobconfs (MANDATORY) - table_1_delimiter: the delimter the file 1 is split on
via jobconfs (MANDATORY) - table_2_delimiter: " "
via jobconfs (MANDATORY) - table_1_target_columns: can be
1) "*" : all columns selected as return value
2) comma delimited list of ints as a string like "1,2,3"
via jobconfs (MANDATORY) - table_2_target_columns: " "
For example and usage of the following 6 parameters, see the docstring of select_where;
they are the same except duplicated for "table_1" and table_2"
via jobconfs (OPTIONAL) - table_1_filter_columns
via jobconfs (OPTIONAL) - table_1_filter_vals
via jobconfs (OPTIONAL) - table_1_invert_filter_vals
via jobconfs (OPTIONAL) - table_2_filter_columns
via jobconfs (OPTIONAL) - table_2_filter_vals
via jobconfs (OPTIONAL) - table_2_invert_filter_vals
Yields:
a subset of the (key_1+key_2+..., value) for each input pair
"""
PREFIX = None
INPUT = os.environ["mapreduce_map_input_file"]
#Determine what table this row is a part of.
#To resolve the known issue listed in the readme about paths not containing asterisks, this needs
#to be updated to include some fancy regex logic
for t1p in os.environ["table_1_path"].split(","):
if INPUT.startswith(t1p):
PREFIX = "1"
break
if not PREFIX:
for t2p in os.environ["table_2_path"].split(","):
if INPUT.startswith(t2p):
PREFIX = "2"
break
if not PREFIX:
raise Exception("Bug: File {0} matches neither input path 1 ({1}) or input path 2 ({2})".format(INPUT, os.environ["table_1_path"], os.environ["table_2_path"]))
TABLE = os.environ["table_{0}_path".format(PREFIX)]
if not "filtering" in cache and "table_{0}_filter_columns".format(PREFIX) in os.environ and "table_{0}_filter_vals".format(PREFIX) in os.environ and "table_{0}_invert_filter_vals".format(PREFIX) in os.environ:
cache["filtering"] = _filtering_parsing_helper("table_{0}_filter_columns".format(PREFIX), "table_{0}_filter_vals".format(PREFIX), "table_{0}_invert_filter_vals".format(PREFIX))
if not "key_columns" in cache:
cache["key_columns"] = [int(x) for x in os.environ["table_{0}_key_columns".format(PREFIX)].split(",")] #list
if not "target_columns" in cache:
if os.environ["table_{0}_target_columns".format(PREFIX)] == "*":
cache["target_columns"] = "*"
else:
cache["target_columns"] = [int(x) for x in os.environ["table_{0}_target_columns".format(PREFIX)].split(",")] #list
if not "delimiter" in cache:
cache["delimiter"] = os.environ["table_{0}_delimiter".format(PREFIX)]
k, v = _kv_helper(cache, value)
if k and v:
outdict = {}
outdict["table"] = TABLE
outdict["row"] = v
yield k, outdict
|
normal
|
{
"blob_id": "da0076ab18531e5b8a1de909cb9178de6327d6b0",
"index": 3440,
"step-1": "<mask token>\n\n\ndef _filtering_parsing_helper(filter_cols_key, filter_vals_key,\n filter_invert_key):\n filter_vals = os.environ[filter_vals_key].split('|')\n inverts = [int(y) for y in os.environ[filter_invert_key].split('|')]\n filter_dict = {}\n for xindex, x in enumerate([int(y) for y in os.environ[filter_cols_key]\n .split('|')]):\n filter_dict[x] = {}\n filter_dict[x]['filter_vals'] = filter_vals[xindex].split(',')\n filter_dict[x]['invert'] = inverts[xindex]\n return filter_dict\n\n\n<mask token>\n\n\ndef _kv_helper(cache, value):\n \"\"\"shared code between select_where and select_join\n \n splits vals, see if filtering passes, forms the key from key_columns and forms the values from target_columns\n \"\"\"\n vals = [v.replace('\"', '') for v in value.split(cache['delimiter'])]\n if 'filtering' not in cache or _filtering_passed_helper(cache[\n 'filtering'], vals):\n k = '+'.join(vals) if cache['key_columns'] == '*' else '+'.join(\n vals[l] for l in cache['key_columns'])\n v = ','.join(vals) if cache['target_columns'] == '*' else ','.join([\n vals[l] for l in cache['target_columns']])\n return k, v\n return None, None\n\n\ndef identity_mapper(key, value):\n \"\"\" Does Nothing; used when all work done in reduce phase \n \"\"\"\n yield key, value\n\n\ndef token_count_mapper(key, value):\n \"\"\" Purpose:\n Splits lines of each file by whitespace (value holds lines), and emits (token, 1) for each token in the line.\n When combined with a basic count reducer, implements wordcount.\n \n Args:\n key: byte offset (not used in this function)\n value: (string)\n Yields:\n A series of tuples of the form (key, 1) \n \"\"\"\n for token in value.split():\n yield token, 1\n\n\n<mask token>\n\n\ndef join_mapper(key, value, cache={}):\n \"\"\"\"table\" refers to all files in one HDFS root directory below:\n \n PURPOSE: \n Very similar to \"select_where_mapper\" except the key output is different. \n Outputs (key_1+key_2,..., value) where key_1, key_2,... are given by the columns specified in key_columns. \n \n When run on tables I_1, I_2 that share keys \"1,2,3\", \n where I_1 has the shared keys in columns A,B,C \n and I_2 has they shared keys in columns D,C,E,\n then run with join_inner_reducer, this implements:\n SELECT I_1.COL_a, I_1.COL_b,.. I_2.COL_a, I_2.COL_b,\n FROM I_1 INNER JOIN I_2 \n ON I_1.key1 = I_2.key1, I_1.key=I_2.key2,...\n WHERE I_1.filter_column_1 (not) in filter_vals_1_for_I_1, I_1.filter_column_2 (not) in filter_vals_2_for_I_1, ...\n and I_2.filter_column_1 (not) in filter_vals_1_for_I_2, I_2.filter_column_2 (not) in filter_vals_2_for_I_2, ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - table_1_path='...' string representing the HDFS path(s) (if multiple, should be a string with commas between the individual paths) of the files of \"table 1\". used to parse out the key columns from this table when table 2 has the same keys but in different columns\n via jobconfs (MANDATORY) - table_2_path='...' \" \"\n via jobconfs (MANDATORY) - table_1_key_columns=1,2,3': comma delimited list of ints as a string like \"1,2,3\"\n via jobconfs (MANDATORY) - table_2_key_columns=1,2,3': \" \"\n via jobconfs (MANDATORY) - table_1_delimiter: the delimter the file 1 is split on\n via jobconfs (MANDATORY) - table_2_delimiter: \" \"\n via jobconfs (MANDATORY) - table_1_target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n via jobconfs (MANDATORY) - table_2_target_columns: \" \" \n \n For example and usage of the following 6 parameters, see the docstring of select_where;\n they are the same except duplicated for \"table_1\" and table_2\"\n \n via jobconfs (OPTIONAL) - table_1_filter_columns\n via jobconfs (OPTIONAL) - table_1_filter_vals\n via jobconfs (OPTIONAL) - table_1_invert_filter_vals\n via jobconfs (OPTIONAL) - table_2_filter_columns\n via jobconfs (OPTIONAL) - table_2_filter_vals\n via jobconfs (OPTIONAL) - table_2_invert_filter_vals\n Yields:\n a subset of the (key_1+key_2+..., value) for each input pair\n \"\"\"\n PREFIX = None\n INPUT = os.environ['mapreduce_map_input_file']\n for t1p in os.environ['table_1_path'].split(','):\n if INPUT.startswith(t1p):\n PREFIX = '1'\n break\n if not PREFIX:\n for t2p in os.environ['table_2_path'].split(','):\n if INPUT.startswith(t2p):\n PREFIX = '2'\n break\n if not PREFIX:\n raise Exception(\n 'Bug: File {0} matches neither input path 1 ({1}) or input path 2 ({2})'\n .format(INPUT, os.environ['table_1_path'], os.environ[\n 'table_2_path']))\n TABLE = os.environ['table_{0}_path'.format(PREFIX)]\n if not 'filtering' in cache and 'table_{0}_filter_columns'.format(PREFIX\n ) in os.environ and 'table_{0}_filter_vals'.format(PREFIX\n ) in os.environ and 'table_{0}_invert_filter_vals'.format(PREFIX\n ) in os.environ:\n cache['filtering'] = _filtering_parsing_helper(\n 'table_{0}_filter_columns'.format(PREFIX),\n 'table_{0}_filter_vals'.format(PREFIX),\n 'table_{0}_invert_filter_vals'.format(PREFIX))\n if not 'key_columns' in cache:\n cache['key_columns'] = [int(x) for x in os.environ[\n 'table_{0}_key_columns'.format(PREFIX)].split(',')]\n if not 'target_columns' in cache:\n if os.environ['table_{0}_target_columns'.format(PREFIX)] == '*':\n cache['target_columns'] = '*'\n else:\n cache['target_columns'] = [int(x) for x in os.environ[\n 'table_{0}_target_columns'.format(PREFIX)].split(',')]\n if not 'delimiter' in cache:\n cache['delimiter'] = os.environ['table_{0}_delimiter'.format(PREFIX)]\n k, v = _kv_helper(cache, value)\n if k and v:\n outdict = {}\n outdict['table'] = TABLE\n outdict['row'] = v\n yield k, outdict\n",
"step-2": "<mask token>\n\n\ndef _filtering_parsing_helper(filter_cols_key, filter_vals_key,\n filter_invert_key):\n filter_vals = os.environ[filter_vals_key].split('|')\n inverts = [int(y) for y in os.environ[filter_invert_key].split('|')]\n filter_dict = {}\n for xindex, x in enumerate([int(y) for y in os.environ[filter_cols_key]\n .split('|')]):\n filter_dict[x] = {}\n filter_dict[x]['filter_vals'] = filter_vals[xindex].split(',')\n filter_dict[x]['invert'] = inverts[xindex]\n return filter_dict\n\n\ndef _filtering_passed_helper(filter_dict, vals):\n yield_row = True\n for filter_col in filter_dict.keys():\n if filter_dict[filter_col]['invert'] and vals[filter_col\n ] in filter_dict[filter_col]['filter_vals'] or not filter_dict[\n filter_col]['invert'] and vals[filter_col] not in filter_dict[\n filter_col]['filter_vals']:\n yield_row = False\n break\n return yield_row\n\n\ndef _kv_helper(cache, value):\n \"\"\"shared code between select_where and select_join\n \n splits vals, see if filtering passes, forms the key from key_columns and forms the values from target_columns\n \"\"\"\n vals = [v.replace('\"', '') for v in value.split(cache['delimiter'])]\n if 'filtering' not in cache or _filtering_passed_helper(cache[\n 'filtering'], vals):\n k = '+'.join(vals) if cache['key_columns'] == '*' else '+'.join(\n vals[l] for l in cache['key_columns'])\n v = ','.join(vals) if cache['target_columns'] == '*' else ','.join([\n vals[l] for l in cache['target_columns']])\n return k, v\n return None, None\n\n\ndef identity_mapper(key, value):\n \"\"\" Does Nothing; used when all work done in reduce phase \n \"\"\"\n yield key, value\n\n\ndef token_count_mapper(key, value):\n \"\"\" Purpose:\n Splits lines of each file by whitespace (value holds lines), and emits (token, 1) for each token in the line.\n When combined with a basic count reducer, implements wordcount.\n \n Args:\n key: byte offset (not used in this function)\n value: (string)\n Yields:\n A series of tuples of the form (key, 1) \n \"\"\"\n for token in value.split():\n yield token, 1\n\n\n<mask token>\n\n\ndef join_mapper(key, value, cache={}):\n \"\"\"\"table\" refers to all files in one HDFS root directory below:\n \n PURPOSE: \n Very similar to \"select_where_mapper\" except the key output is different. \n Outputs (key_1+key_2,..., value) where key_1, key_2,... are given by the columns specified in key_columns. \n \n When run on tables I_1, I_2 that share keys \"1,2,3\", \n where I_1 has the shared keys in columns A,B,C \n and I_2 has they shared keys in columns D,C,E,\n then run with join_inner_reducer, this implements:\n SELECT I_1.COL_a, I_1.COL_b,.. I_2.COL_a, I_2.COL_b,\n FROM I_1 INNER JOIN I_2 \n ON I_1.key1 = I_2.key1, I_1.key=I_2.key2,...\n WHERE I_1.filter_column_1 (not) in filter_vals_1_for_I_1, I_1.filter_column_2 (not) in filter_vals_2_for_I_1, ...\n and I_2.filter_column_1 (not) in filter_vals_1_for_I_2, I_2.filter_column_2 (not) in filter_vals_2_for_I_2, ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - table_1_path='...' string representing the HDFS path(s) (if multiple, should be a string with commas between the individual paths) of the files of \"table 1\". used to parse out the key columns from this table when table 2 has the same keys but in different columns\n via jobconfs (MANDATORY) - table_2_path='...' \" \"\n via jobconfs (MANDATORY) - table_1_key_columns=1,2,3': comma delimited list of ints as a string like \"1,2,3\"\n via jobconfs (MANDATORY) - table_2_key_columns=1,2,3': \" \"\n via jobconfs (MANDATORY) - table_1_delimiter: the delimter the file 1 is split on\n via jobconfs (MANDATORY) - table_2_delimiter: \" \"\n via jobconfs (MANDATORY) - table_1_target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n via jobconfs (MANDATORY) - table_2_target_columns: \" \" \n \n For example and usage of the following 6 parameters, see the docstring of select_where;\n they are the same except duplicated for \"table_1\" and table_2\"\n \n via jobconfs (OPTIONAL) - table_1_filter_columns\n via jobconfs (OPTIONAL) - table_1_filter_vals\n via jobconfs (OPTIONAL) - table_1_invert_filter_vals\n via jobconfs (OPTIONAL) - table_2_filter_columns\n via jobconfs (OPTIONAL) - table_2_filter_vals\n via jobconfs (OPTIONAL) - table_2_invert_filter_vals\n Yields:\n a subset of the (key_1+key_2+..., value) for each input pair\n \"\"\"\n PREFIX = None\n INPUT = os.environ['mapreduce_map_input_file']\n for t1p in os.environ['table_1_path'].split(','):\n if INPUT.startswith(t1p):\n PREFIX = '1'\n break\n if not PREFIX:\n for t2p in os.environ['table_2_path'].split(','):\n if INPUT.startswith(t2p):\n PREFIX = '2'\n break\n if not PREFIX:\n raise Exception(\n 'Bug: File {0} matches neither input path 1 ({1}) or input path 2 ({2})'\n .format(INPUT, os.environ['table_1_path'], os.environ[\n 'table_2_path']))\n TABLE = os.environ['table_{0}_path'.format(PREFIX)]\n if not 'filtering' in cache and 'table_{0}_filter_columns'.format(PREFIX\n ) in os.environ and 'table_{0}_filter_vals'.format(PREFIX\n ) in os.environ and 'table_{0}_invert_filter_vals'.format(PREFIX\n ) in os.environ:\n cache['filtering'] = _filtering_parsing_helper(\n 'table_{0}_filter_columns'.format(PREFIX),\n 'table_{0}_filter_vals'.format(PREFIX),\n 'table_{0}_invert_filter_vals'.format(PREFIX))\n if not 'key_columns' in cache:\n cache['key_columns'] = [int(x) for x in os.environ[\n 'table_{0}_key_columns'.format(PREFIX)].split(',')]\n if not 'target_columns' in cache:\n if os.environ['table_{0}_target_columns'.format(PREFIX)] == '*':\n cache['target_columns'] = '*'\n else:\n cache['target_columns'] = [int(x) for x in os.environ[\n 'table_{0}_target_columns'.format(PREFIX)].split(',')]\n if not 'delimiter' in cache:\n cache['delimiter'] = os.environ['table_{0}_delimiter'.format(PREFIX)]\n k, v = _kv_helper(cache, value)\n if k and v:\n outdict = {}\n outdict['table'] = TABLE\n outdict['row'] = v\n yield k, outdict\n",
"step-3": "<mask token>\n\n\ndef _filtering_parsing_helper(filter_cols_key, filter_vals_key,\n filter_invert_key):\n filter_vals = os.environ[filter_vals_key].split('|')\n inverts = [int(y) for y in os.environ[filter_invert_key].split('|')]\n filter_dict = {}\n for xindex, x in enumerate([int(y) for y in os.environ[filter_cols_key]\n .split('|')]):\n filter_dict[x] = {}\n filter_dict[x]['filter_vals'] = filter_vals[xindex].split(',')\n filter_dict[x]['invert'] = inverts[xindex]\n return filter_dict\n\n\ndef _filtering_passed_helper(filter_dict, vals):\n yield_row = True\n for filter_col in filter_dict.keys():\n if filter_dict[filter_col]['invert'] and vals[filter_col\n ] in filter_dict[filter_col]['filter_vals'] or not filter_dict[\n filter_col]['invert'] and vals[filter_col] not in filter_dict[\n filter_col]['filter_vals']:\n yield_row = False\n break\n return yield_row\n\n\ndef _kv_helper(cache, value):\n \"\"\"shared code between select_where and select_join\n \n splits vals, see if filtering passes, forms the key from key_columns and forms the values from target_columns\n \"\"\"\n vals = [v.replace('\"', '') for v in value.split(cache['delimiter'])]\n if 'filtering' not in cache or _filtering_passed_helper(cache[\n 'filtering'], vals):\n k = '+'.join(vals) if cache['key_columns'] == '*' else '+'.join(\n vals[l] for l in cache['key_columns'])\n v = ','.join(vals) if cache['target_columns'] == '*' else ','.join([\n vals[l] for l in cache['target_columns']])\n return k, v\n return None, None\n\n\ndef identity_mapper(key, value):\n \"\"\" Does Nothing; used when all work done in reduce phase \n \"\"\"\n yield key, value\n\n\ndef token_count_mapper(key, value):\n \"\"\" Purpose:\n Splits lines of each file by whitespace (value holds lines), and emits (token, 1) for each token in the line.\n When combined with a basic count reducer, implements wordcount.\n \n Args:\n key: byte offset (not used in this function)\n value: (string)\n Yields:\n A series of tuples of the form (key, 1) \n \"\"\"\n for token in value.split():\n yield token, 1\n\n\ndef select_where(key, value, cache={}):\n \"\"\"\n PURPOSE: \n When combined with an identiy reducer this implements:\n \n SELECT (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = target_column_1, ..., target_column_N\n FROM (input dataset)\n GROUP BY target_column_1, ..., target_column_N;\n WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...\n \n When combined with a count reducer this implements:\n \n SELECT (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = count(*)\n FROM (input dataset)\n GROUP BY target_column_1, ..., target_column_N;\n WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n\n via jobconfs (MANDATORY) - key_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\"\n \n via jobconfs (OPTIONAL) - delimiter: the delimter the file is split on\n via jobconfs (OPTIONAL) - filter_columns : pipe delimited list of ints like: 1|2|3. This list will be split\n and entry i will be used with filter vals list i\n via jobconfs (OPTIONAL) - filter_vals: is a pipe delimited list of comma delimited list of strings as a string like a,b,c|d,e,f|,..\n this list is split on | and entry i is used as the filter list for filter column i\n via jobconfs (OPTIONAL) - invert_filter_vals: pipe delimited list of boolean integers, e.g., 0|1|0...\n this list is split, and used to trigger \"not in\" instead of in (like WHERE NOT)try i is 1, then values are selected where filter column i is \n NOT in filter_vals list i\n \n All three of these must be passed in or nothing happens (no where clause) \n \n EXAMPLE:\n jobconf = ['filter_columns=1|2, filtervals=a,b|c,d, invert_filter_vals = 0|1\n \n Does a \n SELECT * where column[1] in [\"a\",\"b\"] and column[2] NOT in [\"c,d\"]\n \n Note that you can pass in the same column twice where invert_filter_vals = 0 then 1, e.g.,:\n jobconf = ['filter_columns=1|1, filtervals=a,b|c,d, invert_filter_vals = 0|1\n to get a \"column[1] in [\"a\",\"b\"] but not in [\"c\",\"d\"] effect. \n \n Yields:\n (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = target_column_1, ..., target_column_N)\n for the subset of (key, value) inputs matching the where clause\n \"\"\"\n if (not 'filtering' in cache and 'filter_columns' in os.environ and \n 'filter_vals' in os.environ and 'invert_filter_vals' in os.environ):\n cache['filtering'] = _filtering_parsing_helper('filter_columns',\n 'filter_vals', 'invert_filter_vals')\n if not 'delimiter' in cache:\n cache['delimiter'] = os.environ['delimiter']\n if not 'target_columns' in cache:\n if os.environ['target_columns'] == '*':\n cache['target_columns'] = '*'\n else:\n cache['target_columns'] = [int(x) for x in os.environ[\n 'target_columns'].split(',')]\n if not 'key_columns' in cache:\n if os.environ['key_columns'] == '*':\n cache['key_columns'] = '*'\n else:\n cache['key_columns'] = [int(x) for x in os.environ[\n 'key_columns'].split(',')]\n k, v = _kv_helper(cache, value)\n if k and v:\n yield k, v\n\n\ndef join_mapper(key, value, cache={}):\n \"\"\"\"table\" refers to all files in one HDFS root directory below:\n \n PURPOSE: \n Very similar to \"select_where_mapper\" except the key output is different. \n Outputs (key_1+key_2,..., value) where key_1, key_2,... are given by the columns specified in key_columns. \n \n When run on tables I_1, I_2 that share keys \"1,2,3\", \n where I_1 has the shared keys in columns A,B,C \n and I_2 has they shared keys in columns D,C,E,\n then run with join_inner_reducer, this implements:\n SELECT I_1.COL_a, I_1.COL_b,.. I_2.COL_a, I_2.COL_b,\n FROM I_1 INNER JOIN I_2 \n ON I_1.key1 = I_2.key1, I_1.key=I_2.key2,...\n WHERE I_1.filter_column_1 (not) in filter_vals_1_for_I_1, I_1.filter_column_2 (not) in filter_vals_2_for_I_1, ...\n and I_2.filter_column_1 (not) in filter_vals_1_for_I_2, I_2.filter_column_2 (not) in filter_vals_2_for_I_2, ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - table_1_path='...' string representing the HDFS path(s) (if multiple, should be a string with commas between the individual paths) of the files of \"table 1\". used to parse out the key columns from this table when table 2 has the same keys but in different columns\n via jobconfs (MANDATORY) - table_2_path='...' \" \"\n via jobconfs (MANDATORY) - table_1_key_columns=1,2,3': comma delimited list of ints as a string like \"1,2,3\"\n via jobconfs (MANDATORY) - table_2_key_columns=1,2,3': \" \"\n via jobconfs (MANDATORY) - table_1_delimiter: the delimter the file 1 is split on\n via jobconfs (MANDATORY) - table_2_delimiter: \" \"\n via jobconfs (MANDATORY) - table_1_target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n via jobconfs (MANDATORY) - table_2_target_columns: \" \" \n \n For example and usage of the following 6 parameters, see the docstring of select_where;\n they are the same except duplicated for \"table_1\" and table_2\"\n \n via jobconfs (OPTIONAL) - table_1_filter_columns\n via jobconfs (OPTIONAL) - table_1_filter_vals\n via jobconfs (OPTIONAL) - table_1_invert_filter_vals\n via jobconfs (OPTIONAL) - table_2_filter_columns\n via jobconfs (OPTIONAL) - table_2_filter_vals\n via jobconfs (OPTIONAL) - table_2_invert_filter_vals\n Yields:\n a subset of the (key_1+key_2+..., value) for each input pair\n \"\"\"\n PREFIX = None\n INPUT = os.environ['mapreduce_map_input_file']\n for t1p in os.environ['table_1_path'].split(','):\n if INPUT.startswith(t1p):\n PREFIX = '1'\n break\n if not PREFIX:\n for t2p in os.environ['table_2_path'].split(','):\n if INPUT.startswith(t2p):\n PREFIX = '2'\n break\n if not PREFIX:\n raise Exception(\n 'Bug: File {0} matches neither input path 1 ({1}) or input path 2 ({2})'\n .format(INPUT, os.environ['table_1_path'], os.environ[\n 'table_2_path']))\n TABLE = os.environ['table_{0}_path'.format(PREFIX)]\n if not 'filtering' in cache and 'table_{0}_filter_columns'.format(PREFIX\n ) in os.environ and 'table_{0}_filter_vals'.format(PREFIX\n ) in os.environ and 'table_{0}_invert_filter_vals'.format(PREFIX\n ) in os.environ:\n cache['filtering'] = _filtering_parsing_helper(\n 'table_{0}_filter_columns'.format(PREFIX),\n 'table_{0}_filter_vals'.format(PREFIX),\n 'table_{0}_invert_filter_vals'.format(PREFIX))\n if not 'key_columns' in cache:\n cache['key_columns'] = [int(x) for x in os.environ[\n 'table_{0}_key_columns'.format(PREFIX)].split(',')]\n if not 'target_columns' in cache:\n if os.environ['table_{0}_target_columns'.format(PREFIX)] == '*':\n cache['target_columns'] = '*'\n else:\n cache['target_columns'] = [int(x) for x in os.environ[\n 'table_{0}_target_columns'.format(PREFIX)].split(',')]\n if not 'delimiter' in cache:\n cache['delimiter'] = os.environ['table_{0}_delimiter'.format(PREFIX)]\n k, v = _kv_helper(cache, value)\n if k and v:\n outdict = {}\n outdict['table'] = TABLE\n outdict['row'] = v\n yield k, outdict\n",
"step-4": "<mask token>\nimport os\n<mask token>\n\n\ndef _filtering_parsing_helper(filter_cols_key, filter_vals_key,\n filter_invert_key):\n filter_vals = os.environ[filter_vals_key].split('|')\n inverts = [int(y) for y in os.environ[filter_invert_key].split('|')]\n filter_dict = {}\n for xindex, x in enumerate([int(y) for y in os.environ[filter_cols_key]\n .split('|')]):\n filter_dict[x] = {}\n filter_dict[x]['filter_vals'] = filter_vals[xindex].split(',')\n filter_dict[x]['invert'] = inverts[xindex]\n return filter_dict\n\n\ndef _filtering_passed_helper(filter_dict, vals):\n yield_row = True\n for filter_col in filter_dict.keys():\n if filter_dict[filter_col]['invert'] and vals[filter_col\n ] in filter_dict[filter_col]['filter_vals'] or not filter_dict[\n filter_col]['invert'] and vals[filter_col] not in filter_dict[\n filter_col]['filter_vals']:\n yield_row = False\n break\n return yield_row\n\n\ndef _kv_helper(cache, value):\n \"\"\"shared code between select_where and select_join\n \n splits vals, see if filtering passes, forms the key from key_columns and forms the values from target_columns\n \"\"\"\n vals = [v.replace('\"', '') for v in value.split(cache['delimiter'])]\n if 'filtering' not in cache or _filtering_passed_helper(cache[\n 'filtering'], vals):\n k = '+'.join(vals) if cache['key_columns'] == '*' else '+'.join(\n vals[l] for l in cache['key_columns'])\n v = ','.join(vals) if cache['target_columns'] == '*' else ','.join([\n vals[l] for l in cache['target_columns']])\n return k, v\n return None, None\n\n\ndef identity_mapper(key, value):\n \"\"\" Does Nothing; used when all work done in reduce phase \n \"\"\"\n yield key, value\n\n\ndef token_count_mapper(key, value):\n \"\"\" Purpose:\n Splits lines of each file by whitespace (value holds lines), and emits (token, 1) for each token in the line.\n When combined with a basic count reducer, implements wordcount.\n \n Args:\n key: byte offset (not used in this function)\n value: (string)\n Yields:\n A series of tuples of the form (key, 1) \n \"\"\"\n for token in value.split():\n yield token, 1\n\n\ndef select_where(key, value, cache={}):\n \"\"\"\n PURPOSE: \n When combined with an identiy reducer this implements:\n \n SELECT (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = target_column_1, ..., target_column_N\n FROM (input dataset)\n GROUP BY target_column_1, ..., target_column_N;\n WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...\n \n When combined with a count reducer this implements:\n \n SELECT (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = count(*)\n FROM (input dataset)\n GROUP BY target_column_1, ..., target_column_N;\n WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n\n via jobconfs (MANDATORY) - key_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\"\n \n via jobconfs (OPTIONAL) - delimiter: the delimter the file is split on\n via jobconfs (OPTIONAL) - filter_columns : pipe delimited list of ints like: 1|2|3. This list will be split\n and entry i will be used with filter vals list i\n via jobconfs (OPTIONAL) - filter_vals: is a pipe delimited list of comma delimited list of strings as a string like a,b,c|d,e,f|,..\n this list is split on | and entry i is used as the filter list for filter column i\n via jobconfs (OPTIONAL) - invert_filter_vals: pipe delimited list of boolean integers, e.g., 0|1|0...\n this list is split, and used to trigger \"not in\" instead of in (like WHERE NOT)try i is 1, then values are selected where filter column i is \n NOT in filter_vals list i\n \n All three of these must be passed in or nothing happens (no where clause) \n \n EXAMPLE:\n jobconf = ['filter_columns=1|2, filtervals=a,b|c,d, invert_filter_vals = 0|1\n \n Does a \n SELECT * where column[1] in [\"a\",\"b\"] and column[2] NOT in [\"c,d\"]\n \n Note that you can pass in the same column twice where invert_filter_vals = 0 then 1, e.g.,:\n jobconf = ['filter_columns=1|1, filtervals=a,b|c,d, invert_filter_vals = 0|1\n to get a \"column[1] in [\"a\",\"b\"] but not in [\"c\",\"d\"] effect. \n \n Yields:\n (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = target_column_1, ..., target_column_N)\n for the subset of (key, value) inputs matching the where clause\n \"\"\"\n if (not 'filtering' in cache and 'filter_columns' in os.environ and \n 'filter_vals' in os.environ and 'invert_filter_vals' in os.environ):\n cache['filtering'] = _filtering_parsing_helper('filter_columns',\n 'filter_vals', 'invert_filter_vals')\n if not 'delimiter' in cache:\n cache['delimiter'] = os.environ['delimiter']\n if not 'target_columns' in cache:\n if os.environ['target_columns'] == '*':\n cache['target_columns'] = '*'\n else:\n cache['target_columns'] = [int(x) for x in os.environ[\n 'target_columns'].split(',')]\n if not 'key_columns' in cache:\n if os.environ['key_columns'] == '*':\n cache['key_columns'] = '*'\n else:\n cache['key_columns'] = [int(x) for x in os.environ[\n 'key_columns'].split(',')]\n k, v = _kv_helper(cache, value)\n if k and v:\n yield k, v\n\n\ndef join_mapper(key, value, cache={}):\n \"\"\"\"table\" refers to all files in one HDFS root directory below:\n \n PURPOSE: \n Very similar to \"select_where_mapper\" except the key output is different. \n Outputs (key_1+key_2,..., value) where key_1, key_2,... are given by the columns specified in key_columns. \n \n When run on tables I_1, I_2 that share keys \"1,2,3\", \n where I_1 has the shared keys in columns A,B,C \n and I_2 has they shared keys in columns D,C,E,\n then run with join_inner_reducer, this implements:\n SELECT I_1.COL_a, I_1.COL_b,.. I_2.COL_a, I_2.COL_b,\n FROM I_1 INNER JOIN I_2 \n ON I_1.key1 = I_2.key1, I_1.key=I_2.key2,...\n WHERE I_1.filter_column_1 (not) in filter_vals_1_for_I_1, I_1.filter_column_2 (not) in filter_vals_2_for_I_1, ...\n and I_2.filter_column_1 (not) in filter_vals_1_for_I_2, I_2.filter_column_2 (not) in filter_vals_2_for_I_2, ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - table_1_path='...' string representing the HDFS path(s) (if multiple, should be a string with commas between the individual paths) of the files of \"table 1\". used to parse out the key columns from this table when table 2 has the same keys but in different columns\n via jobconfs (MANDATORY) - table_2_path='...' \" \"\n via jobconfs (MANDATORY) - table_1_key_columns=1,2,3': comma delimited list of ints as a string like \"1,2,3\"\n via jobconfs (MANDATORY) - table_2_key_columns=1,2,3': \" \"\n via jobconfs (MANDATORY) - table_1_delimiter: the delimter the file 1 is split on\n via jobconfs (MANDATORY) - table_2_delimiter: \" \"\n via jobconfs (MANDATORY) - table_1_target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n via jobconfs (MANDATORY) - table_2_target_columns: \" \" \n \n For example and usage of the following 6 parameters, see the docstring of select_where;\n they are the same except duplicated for \"table_1\" and table_2\"\n \n via jobconfs (OPTIONAL) - table_1_filter_columns\n via jobconfs (OPTIONAL) - table_1_filter_vals\n via jobconfs (OPTIONAL) - table_1_invert_filter_vals\n via jobconfs (OPTIONAL) - table_2_filter_columns\n via jobconfs (OPTIONAL) - table_2_filter_vals\n via jobconfs (OPTIONAL) - table_2_invert_filter_vals\n Yields:\n a subset of the (key_1+key_2+..., value) for each input pair\n \"\"\"\n PREFIX = None\n INPUT = os.environ['mapreduce_map_input_file']\n for t1p in os.environ['table_1_path'].split(','):\n if INPUT.startswith(t1p):\n PREFIX = '1'\n break\n if not PREFIX:\n for t2p in os.environ['table_2_path'].split(','):\n if INPUT.startswith(t2p):\n PREFIX = '2'\n break\n if not PREFIX:\n raise Exception(\n 'Bug: File {0} matches neither input path 1 ({1}) or input path 2 ({2})'\n .format(INPUT, os.environ['table_1_path'], os.environ[\n 'table_2_path']))\n TABLE = os.environ['table_{0}_path'.format(PREFIX)]\n if not 'filtering' in cache and 'table_{0}_filter_columns'.format(PREFIX\n ) in os.environ and 'table_{0}_filter_vals'.format(PREFIX\n ) in os.environ and 'table_{0}_invert_filter_vals'.format(PREFIX\n ) in os.environ:\n cache['filtering'] = _filtering_parsing_helper(\n 'table_{0}_filter_columns'.format(PREFIX),\n 'table_{0}_filter_vals'.format(PREFIX),\n 'table_{0}_invert_filter_vals'.format(PREFIX))\n if not 'key_columns' in cache:\n cache['key_columns'] = [int(x) for x in os.environ[\n 'table_{0}_key_columns'.format(PREFIX)].split(',')]\n if not 'target_columns' in cache:\n if os.environ['table_{0}_target_columns'.format(PREFIX)] == '*':\n cache['target_columns'] = '*'\n else:\n cache['target_columns'] = [int(x) for x in os.environ[\n 'table_{0}_target_columns'.format(PREFIX)].split(',')]\n if not 'delimiter' in cache:\n cache['delimiter'] = os.environ['table_{0}_delimiter'.format(PREFIX)]\n k, v = _kv_helper(cache, value)\n if k and v:\n outdict = {}\n outdict['table'] = TABLE\n outdict['row'] = v\n yield k, outdict\n",
"step-5": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015 Tommy Carpenter\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport os\n\n\"\"\"\nWarning; here be dragons. Documentation needed. \n\nNo try excepts here unless the MR job can complete without them!\nFail fast and have the exception stack show up in the Hadoop interface logs\n\"\"\"\n\ndef _filtering_parsing_helper(filter_cols_key, filter_vals_key, filter_invert_key):\n filter_vals = os.environ[filter_vals_key].split(\"|\")\n inverts = [int(y) for y in os.environ[filter_invert_key].split(\"|\")]\n filter_dict = {}\n for xindex, x in enumerate([int(y) for y in os.environ[filter_cols_key].split(\"|\")]):\n filter_dict[x] = {} \n filter_dict[x][\"filter_vals\"] = filter_vals[xindex].split(\",\")\n filter_dict[x][\"invert\"] = inverts[xindex]\n return filter_dict\n \ndef _filtering_passed_helper(filter_dict, vals):\n yield_row = True\n for filter_col in filter_dict.keys():\n if (filter_dict[filter_col][\"invert\"] and vals[filter_col] in filter_dict[filter_col][\"filter_vals\"]) or (not filter_dict[filter_col][\"invert\"] and vals[filter_col] not in filter_dict[filter_col][\"filter_vals\"]):\n yield_row = False\n break \n return yield_row\n \ndef _kv_helper(cache, value):\n \"\"\"shared code between select_where and select_join\n \n splits vals, see if filtering passes, forms the key from key_columns and forms the values from target_columns\n \"\"\"\n vals = [v.replace('\"','') for v in value.split(cache[\"delimiter\"])]\n if \"filtering\" not in cache or _filtering_passed_helper(cache[\"filtering\"], vals): #yield if filtering criteria met or no filtering criteria \n k = \"+\".join(vals) if cache[\"key_columns\"] == \"*\" else \"+\".join(vals[l] for l in cache[\"key_columns\"]) \n v = \",\".join(vals) if cache[\"target_columns\"] == \"*\" else \",\".join([vals[l] for l in cache[\"target_columns\"]])\n return k, v\n return None, None\n \ndef identity_mapper(key, value):\n \"\"\" Does Nothing; used when all work done in reduce phase \n \"\"\"\n yield key, value\n \ndef token_count_mapper(key, value):\n \"\"\" Purpose:\n Splits lines of each file by whitespace (value holds lines), and emits (token, 1) for each token in the line.\n When combined with a basic count reducer, implements wordcount.\n \n Args:\n key: byte offset (not used in this function)\n value: (string)\n Yields:\n A series of tuples of the form (key, 1) \n \"\"\"\n for token in value.split():\n yield token, 1\n\ndef select_where(key, value, cache={}):\n \"\"\"\n PURPOSE: \n When combined with an identiy reducer this implements:\n \n SELECT (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = target_column_1, ..., target_column_N\n FROM (input dataset)\n GROUP BY target_column_1, ..., target_column_N;\n WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...\n \n When combined with a count reducer this implements:\n \n SELECT (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = count(*)\n FROM (input dataset)\n GROUP BY target_column_1, ..., target_column_N;\n WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n\n via jobconfs (MANDATORY) - key_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\"\n \n via jobconfs (OPTIONAL) - delimiter: the delimter the file is split on\n via jobconfs (OPTIONAL) - filter_columns : pipe delimited list of ints like: 1|2|3. This list will be split\n and entry i will be used with filter vals list i\n via jobconfs (OPTIONAL) - filter_vals: is a pipe delimited list of comma delimited list of strings as a string like a,b,c|d,e,f|,..\n this list is split on | and entry i is used as the filter list for filter column i\n via jobconfs (OPTIONAL) - invert_filter_vals: pipe delimited list of boolean integers, e.g., 0|1|0...\n this list is split, and used to trigger \"not in\" instead of in (like WHERE NOT)try i is 1, then values are selected where filter column i is \n NOT in filter_vals list i\n \n All three of these must be passed in or nothing happens (no where clause) \n \n EXAMPLE:\n jobconf = ['filter_columns=1|2, filtervals=a,b|c,d, invert_filter_vals = 0|1\n \n Does a \n SELECT * where column[1] in [\"a\",\"b\"] and column[2] NOT in [\"c,d\"]\n \n Note that you can pass in the same column twice where invert_filter_vals = 0 then 1, e.g.,:\n jobconf = ['filter_columns=1|1, filtervals=a,b|c,d, invert_filter_vals = 0|1\n to get a \"column[1] in [\"a\",\"b\"] but not in [\"c\",\"d\"] effect. \n \n Yields:\n (k, v)\n where k = target_column_1+target_column_2+...,+target_column_N,\n where v = target_column_1, ..., target_column_N)\n for the subset of (key, value) inputs matching the where clause\n \"\"\"\n if not \"filtering\" in cache and \"filter_columns\" in os.environ and \"filter_vals\" in os.environ and \"invert_filter_vals\" in os.environ:\n cache[\"filtering\"] = _filtering_parsing_helper(\"filter_columns\", \"filter_vals\", \"invert_filter_vals\")\n \n if not \"delimiter\" in cache:\n cache[\"delimiter\"] = os.environ[\"delimiter\"]\n\n if not \"target_columns\" in cache:\n if os.environ[\"target_columns\"] == \"*\":\n cache[\"target_columns\"] = \"*\" \n else:\n cache[\"target_columns\"] = [int(x) for x in os.environ[\"target_columns\"].split(\",\")] #list\n\n if not \"key_columns\" in cache:\n if os.environ[\"key_columns\"] == \"*\":\n cache[\"key_columns\"] = \"*\" \n else:\n cache[\"key_columns\"] = [int(x) for x in os.environ[\"key_columns\"].split(\",\")] #list\n \n k, v = _kv_helper(cache, value)\n if k and v:\n yield k,v \n \n\ndef join_mapper(key, value, cache={}):\n \"\"\"\"table\" refers to all files in one HDFS root directory below:\n \n PURPOSE: \n Very similar to \"select_where_mapper\" except the key output is different. \n Outputs (key_1+key_2,..., value) where key_1, key_2,... are given by the columns specified in key_columns. \n \n When run on tables I_1, I_2 that share keys \"1,2,3\", \n where I_1 has the shared keys in columns A,B,C \n and I_2 has they shared keys in columns D,C,E,\n then run with join_inner_reducer, this implements:\n SELECT I_1.COL_a, I_1.COL_b,.. I_2.COL_a, I_2.COL_b,\n FROM I_1 INNER JOIN I_2 \n ON I_1.key1 = I_2.key1, I_1.key=I_2.key2,...\n WHERE I_1.filter_column_1 (not) in filter_vals_1_for_I_1, I_1.filter_column_2 (not) in filter_vals_2_for_I_1, ...\n and I_2.filter_column_1 (not) in filter_vals_1_for_I_2, I_2.filter_column_2 (not) in filter_vals_2_for_I_2, ...\n \n Args:\n key: byte offset (not used in this function; returned as is)\n value: (string)\n via jobconfs (MANDATORY) - table_1_path='...' string representing the HDFS path(s) (if multiple, should be a string with commas between the individual paths) of the files of \"table 1\". used to parse out the key columns from this table when table 2 has the same keys but in different columns\n via jobconfs (MANDATORY) - table_2_path='...' \" \"\n via jobconfs (MANDATORY) - table_1_key_columns=1,2,3': comma delimited list of ints as a string like \"1,2,3\"\n via jobconfs (MANDATORY) - table_2_key_columns=1,2,3': \" \"\n via jobconfs (MANDATORY) - table_1_delimiter: the delimter the file 1 is split on\n via jobconfs (MANDATORY) - table_2_delimiter: \" \"\n via jobconfs (MANDATORY) - table_1_target_columns: can be \n 1) \"*\" : all columns selected as return value\n 2) comma delimited list of ints as a string like \"1,2,3\" \n via jobconfs (MANDATORY) - table_2_target_columns: \" \" \n \n For example and usage of the following 6 parameters, see the docstring of select_where;\n they are the same except duplicated for \"table_1\" and table_2\"\n \n via jobconfs (OPTIONAL) - table_1_filter_columns\n via jobconfs (OPTIONAL) - table_1_filter_vals\n via jobconfs (OPTIONAL) - table_1_invert_filter_vals\n via jobconfs (OPTIONAL) - table_2_filter_columns\n via jobconfs (OPTIONAL) - table_2_filter_vals\n via jobconfs (OPTIONAL) - table_2_invert_filter_vals\n Yields:\n a subset of the (key_1+key_2+..., value) for each input pair\n \"\"\"\n PREFIX = None\n INPUT = os.environ[\"mapreduce_map_input_file\"]\n \n #Determine what table this row is a part of. \n #To resolve the known issue listed in the readme about paths not containing asterisks, this needs \n #to be updated to include some fancy regex logic\n for t1p in os.environ[\"table_1_path\"].split(\",\"):\n if INPUT.startswith(t1p):\n PREFIX = \"1\"\n break\n if not PREFIX:\n for t2p in os.environ[\"table_2_path\"].split(\",\"):\n if INPUT.startswith(t2p):\n PREFIX = \"2\"\n break\n if not PREFIX:\n raise Exception(\"Bug: File {0} matches neither input path 1 ({1}) or input path 2 ({2})\".format(INPUT, os.environ[\"table_1_path\"], os.environ[\"table_2_path\"]))\n \n TABLE = os.environ[\"table_{0}_path\".format(PREFIX)]\n \n if not \"filtering\" in cache and \"table_{0}_filter_columns\".format(PREFIX) in os.environ and \"table_{0}_filter_vals\".format(PREFIX) in os.environ and \"table_{0}_invert_filter_vals\".format(PREFIX) in os.environ:\n cache[\"filtering\"] = _filtering_parsing_helper(\"table_{0}_filter_columns\".format(PREFIX), \"table_{0}_filter_vals\".format(PREFIX), \"table_{0}_invert_filter_vals\".format(PREFIX)) \n \n if not \"key_columns\" in cache:\n cache[\"key_columns\"] = [int(x) for x in os.environ[\"table_{0}_key_columns\".format(PREFIX)].split(\",\")] #list\n\n if not \"target_columns\" in cache:\n if os.environ[\"table_{0}_target_columns\".format(PREFIX)] == \"*\":\n cache[\"target_columns\"] = \"*\" \n else:\n cache[\"target_columns\"] = [int(x) for x in os.environ[\"table_{0}_target_columns\".format(PREFIX)].split(\",\")] #list\n \n if not \"delimiter\" in cache:\n cache[\"delimiter\"] = os.environ[\"table_{0}_delimiter\".format(PREFIX)]\n\n k, v = _kv_helper(cache, value)\n if k and v:\n outdict = {}\n outdict[\"table\"] = TABLE \n outdict[\"row\"] = v\n yield k, outdict\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('core', '0052_encounter_note')]
operations = [migrations.CreateModel(name='FormPrintingCount', fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('form_name', models.
CharField(max_length=255, verbose_name='ชื่อไฟล์ jasper')), ('key',
models.CharField(help_text=
'สิ่งที่ใช้ระบุเอกสารนั้นๆ เช่น pk, วันที่', max_length=255)), (
'count', models.PositiveIntegerField(default=0, verbose_name=
'จำนวนครั้งที่พิมพ์'))]), migrations.AlterUniqueTogether(name=
'formprintingcount', unique_together=set([('form_name', 'key')]))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('core', '0052_encounter_note')]
operations = [migrations.CreateModel(name='FormPrintingCount', fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('form_name', models.
CharField(max_length=255, verbose_name='ชื่อไฟล์ jasper')), ('key',
models.CharField(help_text=
'สิ่งที่ใช้ระบุเอกสารนั้นๆ เช่น pk, วันที่', max_length=255)), (
'count', models.PositiveIntegerField(default=0, verbose_name=
'จำนวนครั้งที่พิมพ์'))]), migrations.AlterUniqueTogether(name=
'formprintingcount', unique_together=set([('form_name', 'key')]))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-20 11:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0052_encounter_note'),
]
operations = [
migrations.CreateModel(
name='FormPrintingCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('form_name', models.CharField(max_length=255, verbose_name='ชื่อไฟล์ jasper')),
('key', models.CharField(help_text='สิ่งที่ใช้ระบุเอกสารนั้นๆ เช่น pk, วันที่', max_length=255)),
('count', models.PositiveIntegerField(default=0, verbose_name='จำนวนครั้งที่พิมพ์')),
],
),
migrations.AlterUniqueTogether(
name='formprintingcount',
unique_together=set([('form_name', 'key')]),
),
]
|
flexible
|
{
"blob_id": "05851df7ae64d792e0c1faf96e2aca5b40e86d53",
"index": 2744,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0052_encounter_note')]\n operations = [migrations.CreateModel(name='FormPrintingCount', fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('form_name', models.\n CharField(max_length=255, verbose_name='ชื่อไฟล์ jasper')), ('key',\n models.CharField(help_text=\n 'สิ่งที่ใช้ระบุเอกสารนั้นๆ เช่น pk, วันที่', max_length=255)), (\n 'count', models.PositiveIntegerField(default=0, verbose_name=\n 'จำนวนครั้งที่พิมพ์'))]), migrations.AlterUniqueTogether(name=\n 'formprintingcount', unique_together=set([('form_name', 'key')]))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0052_encounter_note')]\n operations = [migrations.CreateModel(name='FormPrintingCount', fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('form_name', models.\n CharField(max_length=255, verbose_name='ชื่อไฟล์ jasper')), ('key',\n models.CharField(help_text=\n 'สิ่งที่ใช้ระบุเอกสารนั้นๆ เช่น pk, วันที่', max_length=255)), (\n 'count', models.PositiveIntegerField(default=0, verbose_name=\n 'จำนวนครั้งที่พิมพ์'))]), migrations.AlterUniqueTogether(name=\n 'formprintingcount', unique_together=set([('form_name', 'key')]))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.4 on 2017-10-20 11:05\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0052_encounter_note'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FormPrintingCount',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('form_name', models.CharField(max_length=255, verbose_name='ชื่อไฟล์ jasper')),\n ('key', models.CharField(help_text='สิ่งที่ใช้ระบุเอกสารนั้นๆ เช่น pk, วันที่', max_length=255)),\n ('count', models.PositiveIntegerField(default=0, verbose_name='จำนวนครั้งที่พิมพ์')),\n ],\n ),\n migrations.AlterUniqueTogether(\n name='formprintingcount',\n unique_together=set([('form_name', 'key')]),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
img = cv2.imread('Scan1.jpg')
img_height, img_width, dim = img.shape
cv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])
cv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])
cv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])
cv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)
:img_width])
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "8c6f890631e9696a7907975b5d0bb71d03b380da",
"index": 839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])\ncv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])\ncv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])\ncv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)\n :img_width])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg = cv2.imread('Scan1.jpg')\nimg_height, img_width, dim = img.shape\ncv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])\ncv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])\ncv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])\ncv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)\n :img_width])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimg = cv2.imread('Scan1.jpg')\nimg_height, img_width, dim = img.shape\ncv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])\ncv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])\ncv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])\ncv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)\n :img_width])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import random
import itertools
doc = """
Public good game section (Rounds and feedback).
"""
class Constants(BaseConstants):
name_in_url = 'public_goods'
players_per_group = 2
num_rounds = 2
results_template = 'public_goods/Results_c.html'
"""Amount allocated to each player"""
max_savings = c(5)
multiplier = 1
class Subsession(BaseSubsession):
def vars_for_admin_report(self):
savings_session = [p.savings for p in self.get_players() if p.savings != None]
if savings_session:
return {
'avg_saving': sum(savings_session)/len(savings_session),
'min_saving': min(savings_session),
'max_saving': max(savings_session),
}
else:
return {
'avg_saving': '(no data)',
'min_saving': '(no data)',
'max_saving': '(no data)',
}
def creating_session(self):
# self.Constants.endowment = self.session.config['endowment']
# treatments = itertools.cycle(['control', 't1', 't2','t3'])
endowment = c(self.session.config['endowment'])
for g in self.get_groups():
g.com_goal = self.session.config['community_goal_decimal']
if self.round_number == 1:
for g in self.get_groups():
# treatment = next(treatments)
for p in g.get_players():
# p.participant.vars['treat'] = treatment
# p.treat = p.participant.vars['treat']
p.participant.vars['endowment'] = endowment
p.endowment = p.participant.vars['endowment']
# if self.round_number > 1:
# for p in self.get_players():
# p.treat = p.participant.vars['treat']
class Group(BaseGroup):
com_goal = models.FloatField(min=0, max=1)
total_savings = models.CurrencyField(initial=0)
average_savings = models.CurrencyField()
individual_savings_share = models.FloatField()
min_round = models.IntegerField(initial=1, doc="go back to x last round. E.g. 1 for last round")
def set_payoffs(self):
people_in_treatment = self.get_players()
people_in_treatment_num = len(people_in_treatment)
self.total_savings = sum([p.savings for p in people_in_treatment])
self.individual_savings_share = self.total_savings / (people_in_treatment_num * self.session.config['endowment'])
self.average_savings = self.total_savings / people_in_treatment_num
if self.com_goal > 0:
if self.individual_savings_share >= self.com_goal:
for p in people_in_treatment:
p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
p.financial_reward = (p.participant.vars['endowment']).to_real_world_currency(self.session) + (self.total_savings / Constants.players_per_group).to_real_world_currency(self.session)
p.endowment = p.participant.vars['endowment']
if self.round_number > self.min_round:
p.last_savings = p.in_round(self.round_number - self.min_round).savings
else:
for p in self.get_players():
p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session)
p.endowment = p.participant.vars['endowment']
if self.round_number > self.min_round:
p.last_savings = p.in_round(self.round_number - self.min_round).savings
#
#def set_payoffs(self):
# for treatment_name in ['control', 'D', 'DTI']:
# people_in_treatment = self.get_players_by_treatment(treatment_name)
# people_in_treatment_num = len(people_in_treatment)
# total_savings = sum([p.savings for p in people_in_treatment])
# individual_savings_share = total_savings / (people_in_treatment_num * self.session.config['endowment'])
# average_savings = total_savings / people_in_treatment_num
#
# if self.com_goal > 0:
# if individual_savings_share >= self.com_goal:
# for p in people_in_treatment:
# p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
# p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session) + (self.total_savings / Constants.players_per_group).to_real_world_currency(self.session)
# p.endowment = p.participant.vars['endowment']
# if self.round_number > self.min_round:
# p.last_savings = p.in_round(self.round_number - self.min_round).savings
# else:
# for p in self.get_players_by_treatment(treatment_name):
# p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
# p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session)
# p.endowment = p.participant.vars['endowment']
# if self.round_number > self.min_round:
# p.last_savings = p.in_round(self.round_number - self.min_round).savings
#
class Player(BasePlayer):
treatment = models.CharField(doc="Treatment of each player")
endowment = models.CurrencyField(
min=0,
doc="endowment by each player"
)
peers =
savings = models.CurrencyField(min=0, max=Constants.max_savings, doc="Savings by each player",choices=[c(0), c(2), c(4)])
financial_reward = models.FloatField(min=0)
last_savings = models.CurrencyField(initial=0)
|
normal
|
{
"blob_id": "e766bba4dec0d37858f1f24083c238763d694109",
"index": 7874,
"step-1": "from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport random\nimport itertools\n\ndoc = \"\"\"\n Public good game section (Rounds and feedback).\n \"\"\"\n\nclass Constants(BaseConstants):\n name_in_url = 'public_goods'\n players_per_group = 2\n num_rounds = 2\n\n results_template = 'public_goods/Results_c.html'\n\n \"\"\"Amount allocated to each player\"\"\"\n max_savings = c(5)\n multiplier = 1\n\n\nclass Subsession(BaseSubsession):\n\n def vars_for_admin_report(self):\n savings_session = [p.savings for p in self.get_players() if p.savings != None]\n if savings_session:\n return {\n 'avg_saving': sum(savings_session)/len(savings_session),\n 'min_saving': min(savings_session),\n 'max_saving': max(savings_session),\n }\n else:\n return {\n 'avg_saving': '(no data)',\n 'min_saving': '(no data)',\n 'max_saving': '(no data)',\n }\n\n def creating_session(self):\n # self.Constants.endowment = self.session.config['endowment']\n # treatments = itertools.cycle(['control', 't1', 't2','t3'])\n endowment = c(self.session.config['endowment'])\n for g in self.get_groups():\n g.com_goal = self.session.config['community_goal_decimal']\n if self.round_number == 1:\n for g in self.get_groups():\n # treatment = next(treatments)\n\n for p in g.get_players():\n # p.participant.vars['treat'] = treatment\n # p.treat = p.participant.vars['treat']\n p.participant.vars['endowment'] = endowment\n p.endowment = p.participant.vars['endowment']\n\n # if self.round_number > 1:\n # for p in self.get_players():\n # p.treat = p.participant.vars['treat']\n\n\nclass Group(BaseGroup):\n com_goal = models.FloatField(min=0, max=1)\n total_savings = models.CurrencyField(initial=0)\n average_savings = models.CurrencyField()\n individual_savings_share = models.FloatField()\n min_round = models.IntegerField(initial=1, doc=\"go back to x last round. E.g. 1 for last round\")\n\n def set_payoffs(self):\n people_in_treatment = self.get_players()\n people_in_treatment_num = len(people_in_treatment)\n self.total_savings = sum([p.savings for p in people_in_treatment])\n self.individual_savings_share = self.total_savings / (people_in_treatment_num * self.session.config['endowment'])\n self.average_savings = self.total_savings / people_in_treatment_num\n\n if self.com_goal > 0:\n if self.individual_savings_share >= self.com_goal:\n for p in people_in_treatment:\n p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings\n p.financial_reward = (p.participant.vars['endowment']).to_real_world_currency(self.session) + (self.total_savings / Constants.players_per_group).to_real_world_currency(self.session)\n p.endowment = p.participant.vars['endowment']\n if self.round_number > self.min_round:\n p.last_savings = p.in_round(self.round_number - self.min_round).savings\n else:\n for p in self.get_players():\n p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings\n p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session)\n p.endowment = p.participant.vars['endowment']\n if self.round_number > self.min_round:\n p.last_savings = p.in_round(self.round_number - self.min_round).savings\n#\n #def set_payoffs(self):\n # for treatment_name in ['control', 'D', 'DTI']:\n # people_in_treatment = self.get_players_by_treatment(treatment_name)\n # people_in_treatment_num = len(people_in_treatment)\n # total_savings = sum([p.savings for p in people_in_treatment])\n # individual_savings_share = total_savings / (people_in_treatment_num * self.session.config['endowment'])\n # average_savings = total_savings / people_in_treatment_num\n#\n # if self.com_goal > 0:\n # if individual_savings_share >= self.com_goal:\n # for p in people_in_treatment:\n # p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings\n # p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session) + (self.total_savings / Constants.players_per_group).to_real_world_currency(self.session)\n # p.endowment = p.participant.vars['endowment']\n # if self.round_number > self.min_round:\n # p.last_savings = p.in_round(self.round_number - self.min_round).savings\n # else:\n # for p in self.get_players_by_treatment(treatment_name):\n # p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings\n # p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session)\n # p.endowment = p.participant.vars['endowment']\n # if self.round_number > self.min_round:\n # p.last_savings = p.in_round(self.round_number - self.min_round).savings\n#\n\n\nclass Player(BasePlayer):\n treatment = models.CharField(doc=\"Treatment of each player\")\n endowment = models.CurrencyField(\n min=0,\n doc=\"endowment by each player\"\n )\n peers =\n savings = models.CurrencyField(min=0, max=Constants.max_savings, doc=\"Savings by each player\",choices=[c(0), c(2), c(4)])\n financial_reward = models.FloatField(min=0)\n last_savings = models.CurrencyField(initial=0)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from Graph import create_random_graph
def find_accessible_vertices_backwards(graph, end_vertex):
if end_vertex not in graph.parse_vertices():
raise ValueError("The end vertex is not in the graph.")
visited = []
queue = []
next_vertex = {}
distance_to_end = {}
queue.append(end_vertex)
visited.append(end_vertex)
distance_to_end[end_vertex] = 0
while len(queue) > 0:
y = queue[0]
queue = queue[1:]
for edge in graph.parse_inbound_edges(y):
if edge.source_id not in visited:
visited.append(edge.source_id)
queue.append(edge.source_id)
distance_to_end[edge.source_id] = distance_to_end[y] + 1
next_vertex[edge.source_id] = y
return next_vertex
def find_minimum_length_path(graph, start_vertex, end_vertex):
next_vertex = find_accessible_vertices_backwards(graph, end_vertex)
if start_vertex not in next_vertex.keys():
raise ValueError("There is no path from " + str(start_vertex) + " to " + str(end_vertex))
path = [start_vertex]
last_vertex = start_vertex
reached_end = False
while not reached_end:
path.append(next_vertex[last_vertex])
last_vertex = next_vertex[last_vertex]
if path[-1] == end_vertex:
reached_end = True
return path
def main():
random_graph = create_random_graph(5, 10)
print("THE GRAPH:")
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print("\n")
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print("\n")
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
main()
|
normal
|
{
"blob_id": "f882589729d74a910d20856d4dc02546fe316e0d",
"index": 2994,
"step-1": "<mask token>\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_accessible_vertices_backwards(graph, end_vertex):\n if end_vertex not in graph.parse_vertices():\n raise ValueError('The end vertex is not in the graph.')\n visited = []\n queue = []\n next_vertex = {}\n distance_to_end = {}\n queue.append(end_vertex)\n visited.append(end_vertex)\n distance_to_end[end_vertex] = 0\n while len(queue) > 0:\n y = queue[0]\n queue = queue[1:]\n for edge in graph.parse_inbound_edges(y):\n if edge.source_id not in visited:\n visited.append(edge.source_id)\n queue.append(edge.source_id)\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\n next_vertex[edge.source_id] = y\n return next_vertex\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\nmain()\n",
"step-4": "from Graph import create_random_graph\n\n\ndef find_accessible_vertices_backwards(graph, end_vertex):\n if end_vertex not in graph.parse_vertices():\n raise ValueError('The end vertex is not in the graph.')\n visited = []\n queue = []\n next_vertex = {}\n distance_to_end = {}\n queue.append(end_vertex)\n visited.append(end_vertex)\n distance_to_end[end_vertex] = 0\n while len(queue) > 0:\n y = queue[0]\n queue = queue[1:]\n for edge in graph.parse_inbound_edges(y):\n if edge.source_id not in visited:\n visited.append(edge.source_id)\n queue.append(edge.source_id)\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\n next_vertex[edge.source_id] = y\n return next_vertex\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\nmain()\n",
"step-5": "from Graph import create_random_graph\r\n\r\n\r\ndef find_accessible_vertices_backwards(graph, end_vertex):\r\n if end_vertex not in graph.parse_vertices():\r\n raise ValueError(\"The end vertex is not in the graph.\")\r\n\r\n visited = []\r\n queue = []\r\n next_vertex = {}\r\n distance_to_end = {}\r\n\r\n queue.append(end_vertex)\r\n visited.append(end_vertex)\r\n distance_to_end[end_vertex] = 0\r\n while len(queue) > 0:\r\n y = queue[0]\r\n queue = queue[1:]\r\n for edge in graph.parse_inbound_edges(y):\r\n if edge.source_id not in visited:\r\n visited.append(edge.source_id)\r\n queue.append(edge.source_id)\r\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\r\n next_vertex[edge.source_id] = y\r\n\r\n return next_vertex\r\n\r\n\r\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\r\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\r\n\r\n if start_vertex not in next_vertex.keys():\r\n raise ValueError(\"There is no path from \" + str(start_vertex) + \" to \" + str(end_vertex))\r\n\r\n path = [start_vertex]\r\n last_vertex = start_vertex\r\n reached_end = False\r\n while not reached_end:\r\n path.append(next_vertex[last_vertex])\r\n last_vertex = next_vertex[last_vertex]\r\n if path[-1] == end_vertex:\r\n reached_end = True\r\n\r\n return path\r\n\r\n\r\ndef main():\r\n random_graph = create_random_graph(5, 10)\r\n\r\n print(\"THE GRAPH:\")\r\n for vertex in random_graph.parse_vertices():\r\n for edge in random_graph.parse_outbound_edges(vertex):\r\n print(edge)\r\n\r\n print(\"\\n\")\r\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\r\n print(next_vertex.keys())\r\n print(\"\\n\")\r\n\r\n path = find_minimum_length_path(random_graph, 1, 4)\r\n print(path)\r\n\r\n\r\nmain()",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(Dialogos.saludo[0])
<|reserved_special_token_0|>
print('\nHola', nombre, 'tienes,', edad, 'años.')
if edad >= 18:
print('¡Tienes edad suficiente para jugar!')
quiere_jugar = input('¿Quieres jugar? ').lower()
if quiere_jugar == 'si' or 'yes' or 'y' or 's':
puede_jugar = True
print('\n¡Comienza la aventura! (HP = 5)\n')
else:
puede_jugar = False
print('Adiós...')
elif edad >= 13:
print('¡Puedes jugar bajo supervisión!')
quiere_jugar = input('¿Quieres jugar? ').lower()
if quiere_jugar == 'si':
puede_jugar = True
print('\n¡Comienza la aventura!\n')
else:
puede_jugar = False
print('Adiós...')
else:
puede_jugar = False
print('¡Eres muy joven para jugar!')
print('Adiós...')
while puede_jugar:
puntuacion = 0
hp = hp_inicial
derrota = False
while puntuacion < puntuacion_necesaria and derrota == False:
dado = Funciones.roll_dice(100)
if dado <= 20:
lugar_actual = bosque
elif dado <= 35:
lugar_actual = ciudad
elif dado <= 65:
lugar_actual = montana
elif dado <= 95:
lugar_actual = lago
else:
lugar_actual = viajero
print(lugar_actual.descripcion)
print('a)', lugar_actual.evento_1.nombre, 'b)', lugar_actual.
evento_2.nombre)
decision = ''
while decision != 'a' and decision != 'b':
decision = input()
if decision != 'a' and decision != 'b':
print('Esa opción no existe.')
if decision == 'a':
evento_actual = lugar_actual.evento_1
else:
evento_actual = lugar_actual.evento_2
print(evento_actual.descripcion)
hp, puntuacion, derrota = Funciones.interactuar(evento_actual,
puntuacion, hp, derrota)
print('\n')
Funciones.comprobar_victoria(derrota, puntuacion)
quiere_jugar = input('\n¿Reintentar? ').lower()
print('\n')
if quiere_jugar != 'si' and 'yes' and 'y' and 's':
puede_jugar = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
puntuacion_necesaria = 10
hp_inicial = 5
eventos = [Evento('dormir', 2, False, -3, 4, Dialogos.descripciones_eventos
[0], Dialogos.descripciones_triunfos[0], Dialogos.
descripciones_castigos[0]), Evento('cazar', 1, False, -2, 3, Dialogos.
descripciones_eventos[1], Dialogos.descripciones_triunfos[1], Dialogos.
descripciones_castigos[1]), Evento('comer', 2, False, 1, 1, Dialogos.
descripciones_eventos[2], Dialogos.descripciones_triunfos[2], Dialogos.
descripciones_castigos[2]), Evento('hablar', 0, True, -3, 2, Dialogos.
descripciones_eventos[3], Dialogos.descripciones_triunfos[3], Dialogos.
descripciones_castigos[3]), Evento('escalar', 0, True, -4, 3, Dialogos.
descripciones_eventos[4], Dialogos.descripciones_triunfos[4], Dialogos.
descripciones_castigos[4]), Evento('rodear', 0, False, -1, 3, Dialogos.
descripciones_eventos[5], Dialogos.descripciones_triunfos[5], Dialogos.
descripciones_castigos[5]), Evento('pescar', 2, False, -4, 2, Dialogos.
descripciones_eventos[6], Dialogos.descripciones_triunfos[6], Dialogos.
descripciones_castigos[6]), Evento('contar chiste', 0, True, 0, 6,
Dialogos.descripciones_eventos[7], Dialogos.descripciones_triunfos[7],
Dialogos.descripciones_castigos[7]), Evento('comprar', 3, False, 0, 6,
Dialogos.descripciones_eventos[8], Dialogos.descripciones_triunfos[8],
Dialogos.descripciones_castigos[8])]
dormir = eventos[0]
cazar = eventos[1]
comer = eventos[2]
hablar = eventos[3]
escalar = eventos[4]
rodear = eventos[5]
pescar = eventos[6]
contar_chiste = eventos[7]
comprar = eventos[8]
lugares = [Lugar(1, 20, Dialogos.descripciones_lugares[0], dormir, cazar),
Lugar(21, 40, Dialogos.descripciones_lugares[1], comer, hablar), Lugar(
41, 75, Dialogos.descripciones_lugares[2], escalar, rodear), Lugar(76,
90, Dialogos.descripciones_lugares[3], dormir, pescar), Lugar(91, 100,
Dialogos.descripciones_lugares[4], contar_chiste, comprar)]
bosque = lugares[0]
ciudad = lugares[1]
montana = lugares[2]
lago = lugares[3]
viajero = lugares[4]
print(Dialogos.saludo[0])
nombre = input(Dialogos.saludo[1])
edad = int(input(Dialogos.saludo[2]))
print('\nHola', nombre, 'tienes,', edad, 'años.')
if edad >= 18:
print('¡Tienes edad suficiente para jugar!')
quiere_jugar = input('¿Quieres jugar? ').lower()
if quiere_jugar == 'si' or 'yes' or 'y' or 's':
puede_jugar = True
print('\n¡Comienza la aventura! (HP = 5)\n')
else:
puede_jugar = False
print('Adiós...')
elif edad >= 13:
print('¡Puedes jugar bajo supervisión!')
quiere_jugar = input('¿Quieres jugar? ').lower()
if quiere_jugar == 'si':
puede_jugar = True
print('\n¡Comienza la aventura!\n')
else:
puede_jugar = False
print('Adiós...')
else:
puede_jugar = False
print('¡Eres muy joven para jugar!')
print('Adiós...')
while puede_jugar:
puntuacion = 0
hp = hp_inicial
derrota = False
while puntuacion < puntuacion_necesaria and derrota == False:
dado = Funciones.roll_dice(100)
if dado <= 20:
lugar_actual = bosque
elif dado <= 35:
lugar_actual = ciudad
elif dado <= 65:
lugar_actual = montana
elif dado <= 95:
lugar_actual = lago
else:
lugar_actual = viajero
print(lugar_actual.descripcion)
print('a)', lugar_actual.evento_1.nombre, 'b)', lugar_actual.
evento_2.nombre)
decision = ''
while decision != 'a' and decision != 'b':
decision = input()
if decision != 'a' and decision != 'b':
print('Esa opción no existe.')
if decision == 'a':
evento_actual = lugar_actual.evento_1
else:
evento_actual = lugar_actual.evento_2
print(evento_actual.descripcion)
hp, puntuacion, derrota = Funciones.interactuar(evento_actual,
puntuacion, hp, derrota)
print('\n')
Funciones.comprobar_victoria(derrota, puntuacion)
quiere_jugar = input('\n¿Reintentar? ').lower()
print('\n')
if quiere_jugar != 'si' and 'yes' and 'y' and 's':
puede_jugar = False
<|reserved_special_token_1|>
from Clases import Lugar
from Clases import Evento
import Dialogos
import Funciones
puntuacion_necesaria = 10
hp_inicial = 5
eventos = [Evento('dormir', 2, False, -3, 4, Dialogos.descripciones_eventos
[0], Dialogos.descripciones_triunfos[0], Dialogos.
descripciones_castigos[0]), Evento('cazar', 1, False, -2, 3, Dialogos.
descripciones_eventos[1], Dialogos.descripciones_triunfos[1], Dialogos.
descripciones_castigos[1]), Evento('comer', 2, False, 1, 1, Dialogos.
descripciones_eventos[2], Dialogos.descripciones_triunfos[2], Dialogos.
descripciones_castigos[2]), Evento('hablar', 0, True, -3, 2, Dialogos.
descripciones_eventos[3], Dialogos.descripciones_triunfos[3], Dialogos.
descripciones_castigos[3]), Evento('escalar', 0, True, -4, 3, Dialogos.
descripciones_eventos[4], Dialogos.descripciones_triunfos[4], Dialogos.
descripciones_castigos[4]), Evento('rodear', 0, False, -1, 3, Dialogos.
descripciones_eventos[5], Dialogos.descripciones_triunfos[5], Dialogos.
descripciones_castigos[5]), Evento('pescar', 2, False, -4, 2, Dialogos.
descripciones_eventos[6], Dialogos.descripciones_triunfos[6], Dialogos.
descripciones_castigos[6]), Evento('contar chiste', 0, True, 0, 6,
Dialogos.descripciones_eventos[7], Dialogos.descripciones_triunfos[7],
Dialogos.descripciones_castigos[7]), Evento('comprar', 3, False, 0, 6,
Dialogos.descripciones_eventos[8], Dialogos.descripciones_triunfos[8],
Dialogos.descripciones_castigos[8])]
dormir = eventos[0]
cazar = eventos[1]
comer = eventos[2]
hablar = eventos[3]
escalar = eventos[4]
rodear = eventos[5]
pescar = eventos[6]
contar_chiste = eventos[7]
comprar = eventos[8]
lugares = [Lugar(1, 20, Dialogos.descripciones_lugares[0], dormir, cazar),
Lugar(21, 40, Dialogos.descripciones_lugares[1], comer, hablar), Lugar(
41, 75, Dialogos.descripciones_lugares[2], escalar, rodear), Lugar(76,
90, Dialogos.descripciones_lugares[3], dormir, pescar), Lugar(91, 100,
Dialogos.descripciones_lugares[4], contar_chiste, comprar)]
bosque = lugares[0]
ciudad = lugares[1]
montana = lugares[2]
lago = lugares[3]
viajero = lugares[4]
print(Dialogos.saludo[0])
nombre = input(Dialogos.saludo[1])
edad = int(input(Dialogos.saludo[2]))
print('\nHola', nombre, 'tienes,', edad, 'años.')
if edad >= 18:
print('¡Tienes edad suficiente para jugar!')
quiere_jugar = input('¿Quieres jugar? ').lower()
if quiere_jugar == 'si' or 'yes' or 'y' or 's':
puede_jugar = True
print('\n¡Comienza la aventura! (HP = 5)\n')
else:
puede_jugar = False
print('Adiós...')
elif edad >= 13:
print('¡Puedes jugar bajo supervisión!')
quiere_jugar = input('¿Quieres jugar? ').lower()
if quiere_jugar == 'si':
puede_jugar = True
print('\n¡Comienza la aventura!\n')
else:
puede_jugar = False
print('Adiós...')
else:
puede_jugar = False
print('¡Eres muy joven para jugar!')
print('Adiós...')
while puede_jugar:
puntuacion = 0
hp = hp_inicial
derrota = False
while puntuacion < puntuacion_necesaria and derrota == False:
dado = Funciones.roll_dice(100)
if dado <= 20:
lugar_actual = bosque
elif dado <= 35:
lugar_actual = ciudad
elif dado <= 65:
lugar_actual = montana
elif dado <= 95:
lugar_actual = lago
else:
lugar_actual = viajero
print(lugar_actual.descripcion)
print('a)', lugar_actual.evento_1.nombre, 'b)', lugar_actual.
evento_2.nombre)
decision = ''
while decision != 'a' and decision != 'b':
decision = input()
if decision != 'a' and decision != 'b':
print('Esa opción no existe.')
if decision == 'a':
evento_actual = lugar_actual.evento_1
else:
evento_actual = lugar_actual.evento_2
print(evento_actual.descripcion)
hp, puntuacion, derrota = Funciones.interactuar(evento_actual,
puntuacion, hp, derrota)
print('\n')
Funciones.comprobar_victoria(derrota, puntuacion)
quiere_jugar = input('\n¿Reintentar? ').lower()
print('\n')
if quiere_jugar != 'si' and 'yes' and 'y' and 's':
puede_jugar = False
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from Clases import Lugar
from Clases import Evento
import Dialogos
import Funciones
puntuacion_necesaria = 10
hp_inicial = 5
eventos = [
Evento("dormir", 2, False, -3, 4, Dialogos.descripciones_eventos[0], Dialogos.descripciones_triunfos[0],
Dialogos.descripciones_castigos[0]),
Evento("cazar", 1, False, -2, 3, Dialogos.descripciones_eventos[1], Dialogos.descripciones_triunfos[1],
Dialogos.descripciones_castigos[1]),
Evento("comer", 2, False, 1, 1, Dialogos.descripciones_eventos[2], Dialogos.descripciones_triunfos[2],
Dialogos.descripciones_castigos[2]),
Evento("hablar", 0, True, -3, 2, Dialogos.descripciones_eventos[3], Dialogos.descripciones_triunfos[3],
Dialogos.descripciones_castigos[3]),
Evento("escalar", 0, True, -4, 3, Dialogos.descripciones_eventos[4], Dialogos.descripciones_triunfos[4],
Dialogos.descripciones_castigos[4]),
Evento("rodear", 0, False, -1, 3, Dialogos.descripciones_eventos[5], Dialogos.descripciones_triunfos[5],
Dialogos.descripciones_castigos[5]),
Evento("pescar", 2, False, -4, 2, Dialogos.descripciones_eventos[6], Dialogos.descripciones_triunfos[6],
Dialogos.descripciones_castigos[6]),
Evento("contar chiste", 0, True, 0, 6, Dialogos.descripciones_eventos[7], Dialogos.descripciones_triunfos[7],
Dialogos.descripciones_castigos[7]),
Evento("comprar", 3, False, 0, 6, Dialogos.descripciones_eventos[8], Dialogos.descripciones_triunfos[8],
Dialogos.descripciones_castigos[8])
]
dormir = eventos[0]
cazar = eventos[1]
comer = eventos[2]
hablar = eventos[3]
escalar = eventos[4]
rodear = eventos[5]
pescar = eventos[6]
contar_chiste = eventos[7]
comprar = eventos[8]
lugares = [
Lugar(1, 20, Dialogos.descripciones_lugares[0], dormir, cazar),
Lugar(21, 40, Dialogos.descripciones_lugares[1], comer, hablar),
Lugar(41, 75, Dialogos.descripciones_lugares[2], escalar, rodear),
Lugar(76, 90, Dialogos.descripciones_lugares[3], dormir, pescar),
Lugar(91, 100, Dialogos.descripciones_lugares[4], contar_chiste, comprar)
]
bosque = lugares[0]
ciudad = lugares[1]
montana = lugares[2]
lago = lugares[3]
viajero = lugares[4]
print(Dialogos.saludo[0])
nombre = input(Dialogos.saludo[1])
edad = int(input(Dialogos.saludo[2]))
print("\nHola", nombre, "tienes,", edad, "años.")
if edad >= 18:
print("¡Tienes edad suficiente para jugar!")
quiere_jugar = input("¿Quieres jugar? ").lower()
if quiere_jugar == "si" or "yes" or "y" or "s":
puede_jugar = True
print("\n¡Comienza la aventura! (HP = 5)\n")
else:
puede_jugar = False
print("Adiós...")
elif edad >= 13:
print("¡Puedes jugar bajo supervisión!")
quiere_jugar = input("¿Quieres jugar? ").lower()
if quiere_jugar == "si":
puede_jugar = True
print("\n¡Comienza la aventura!\n")
else:
puede_jugar = False
print("Adiós...")
else:
puede_jugar = False
print("¡Eres muy joven para jugar!")
print("Adiós...")
while puede_jugar:
puntuacion = 0
hp = hp_inicial
derrota = False
while puntuacion < puntuacion_necesaria and derrota == False:
dado = Funciones.roll_dice(100)
if dado <= 20:
lugar_actual = bosque
elif dado <= 35:
lugar_actual = ciudad
elif dado <= 65:
lugar_actual = montana
elif dado <= 95:
lugar_actual = lago
else:
lugar_actual = viajero
print(lugar_actual.descripcion)
print("a)", lugar_actual.evento_1.nombre, "b)", lugar_actual.evento_2.nombre)
decision = ""
while decision != "a" and decision != "b":
decision = input()
if decision != "a" and decision != "b":
print("Esa opción no existe.")
if decision == "a":
evento_actual = lugar_actual.evento_1
else:
evento_actual = lugar_actual.evento_2
print(evento_actual.descripcion)
(hp, puntuacion, derrota) = Funciones.interactuar(evento_actual, puntuacion, hp, derrota)
print("\n")
Funciones.comprobar_victoria(derrota, puntuacion)
quiere_jugar = input("\n¿Reintentar? ").lower()
print("\n")
if quiere_jugar != "si" and "yes" and "y" and "s":
puede_jugar = False
|
flexible
|
{
"blob_id": "fe45fc6cd16be37b320844c5a8b43a964c016dd1",
"index": 5018,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(Dialogos.saludo[0])\n<mask token>\nprint('\\nHola', nombre, 'tienes,', edad, 'años.')\nif edad >= 18:\n print('¡Tienes edad suficiente para jugar!')\n quiere_jugar = input('¿Quieres jugar? ').lower()\n if quiere_jugar == 'si' or 'yes' or 'y' or 's':\n puede_jugar = True\n print('\\n¡Comienza la aventura! (HP = 5)\\n')\n else:\n puede_jugar = False\n print('Adiós...')\nelif edad >= 13:\n print('¡Puedes jugar bajo supervisión!')\n quiere_jugar = input('¿Quieres jugar? ').lower()\n if quiere_jugar == 'si':\n puede_jugar = True\n print('\\n¡Comienza la aventura!\\n')\n else:\n puede_jugar = False\n print('Adiós...')\nelse:\n puede_jugar = False\n print('¡Eres muy joven para jugar!')\n print('Adiós...')\nwhile puede_jugar:\n puntuacion = 0\n hp = hp_inicial\n derrota = False\n while puntuacion < puntuacion_necesaria and derrota == False:\n dado = Funciones.roll_dice(100)\n if dado <= 20:\n lugar_actual = bosque\n elif dado <= 35:\n lugar_actual = ciudad\n elif dado <= 65:\n lugar_actual = montana\n elif dado <= 95:\n lugar_actual = lago\n else:\n lugar_actual = viajero\n print(lugar_actual.descripcion)\n print('a)', lugar_actual.evento_1.nombre, 'b)', lugar_actual.\n evento_2.nombre)\n decision = ''\n while decision != 'a' and decision != 'b':\n decision = input()\n if decision != 'a' and decision != 'b':\n print('Esa opción no existe.')\n if decision == 'a':\n evento_actual = lugar_actual.evento_1\n else:\n evento_actual = lugar_actual.evento_2\n print(evento_actual.descripcion)\n hp, puntuacion, derrota = Funciones.interactuar(evento_actual,\n puntuacion, hp, derrota)\n print('\\n')\n Funciones.comprobar_victoria(derrota, puntuacion)\n quiere_jugar = input('\\n¿Reintentar? ').lower()\n print('\\n')\n if quiere_jugar != 'si' and 'yes' and 'y' and 's':\n puede_jugar = False\n",
"step-3": "<mask token>\npuntuacion_necesaria = 10\nhp_inicial = 5\neventos = [Evento('dormir', 2, False, -3, 4, Dialogos.descripciones_eventos\n [0], Dialogos.descripciones_triunfos[0], Dialogos.\n descripciones_castigos[0]), Evento('cazar', 1, False, -2, 3, Dialogos.\n descripciones_eventos[1], Dialogos.descripciones_triunfos[1], Dialogos.\n descripciones_castigos[1]), Evento('comer', 2, False, 1, 1, Dialogos.\n descripciones_eventos[2], Dialogos.descripciones_triunfos[2], Dialogos.\n descripciones_castigos[2]), Evento('hablar', 0, True, -3, 2, Dialogos.\n descripciones_eventos[3], Dialogos.descripciones_triunfos[3], Dialogos.\n descripciones_castigos[3]), Evento('escalar', 0, True, -4, 3, Dialogos.\n descripciones_eventos[4], Dialogos.descripciones_triunfos[4], Dialogos.\n descripciones_castigos[4]), Evento('rodear', 0, False, -1, 3, Dialogos.\n descripciones_eventos[5], Dialogos.descripciones_triunfos[5], Dialogos.\n descripciones_castigos[5]), Evento('pescar', 2, False, -4, 2, Dialogos.\n descripciones_eventos[6], Dialogos.descripciones_triunfos[6], Dialogos.\n descripciones_castigos[6]), Evento('contar chiste', 0, True, 0, 6,\n Dialogos.descripciones_eventos[7], Dialogos.descripciones_triunfos[7],\n Dialogos.descripciones_castigos[7]), Evento('comprar', 3, False, 0, 6,\n Dialogos.descripciones_eventos[8], Dialogos.descripciones_triunfos[8],\n Dialogos.descripciones_castigos[8])]\ndormir = eventos[0]\ncazar = eventos[1]\ncomer = eventos[2]\nhablar = eventos[3]\nescalar = eventos[4]\nrodear = eventos[5]\npescar = eventos[6]\ncontar_chiste = eventos[7]\ncomprar = eventos[8]\nlugares = [Lugar(1, 20, Dialogos.descripciones_lugares[0], dormir, cazar),\n Lugar(21, 40, Dialogos.descripciones_lugares[1], comer, hablar), Lugar(\n 41, 75, Dialogos.descripciones_lugares[2], escalar, rodear), Lugar(76, \n 90, Dialogos.descripciones_lugares[3], dormir, pescar), Lugar(91, 100,\n Dialogos.descripciones_lugares[4], contar_chiste, comprar)]\nbosque = lugares[0]\nciudad = lugares[1]\nmontana = lugares[2]\nlago = lugares[3]\nviajero = lugares[4]\nprint(Dialogos.saludo[0])\nnombre = input(Dialogos.saludo[1])\nedad = int(input(Dialogos.saludo[2]))\nprint('\\nHola', nombre, 'tienes,', edad, 'años.')\nif edad >= 18:\n print('¡Tienes edad suficiente para jugar!')\n quiere_jugar = input('¿Quieres jugar? ').lower()\n if quiere_jugar == 'si' or 'yes' or 'y' or 's':\n puede_jugar = True\n print('\\n¡Comienza la aventura! (HP = 5)\\n')\n else:\n puede_jugar = False\n print('Adiós...')\nelif edad >= 13:\n print('¡Puedes jugar bajo supervisión!')\n quiere_jugar = input('¿Quieres jugar? ').lower()\n if quiere_jugar == 'si':\n puede_jugar = True\n print('\\n¡Comienza la aventura!\\n')\n else:\n puede_jugar = False\n print('Adiós...')\nelse:\n puede_jugar = False\n print('¡Eres muy joven para jugar!')\n print('Adiós...')\nwhile puede_jugar:\n puntuacion = 0\n hp = hp_inicial\n derrota = False\n while puntuacion < puntuacion_necesaria and derrota == False:\n dado = Funciones.roll_dice(100)\n if dado <= 20:\n lugar_actual = bosque\n elif dado <= 35:\n lugar_actual = ciudad\n elif dado <= 65:\n lugar_actual = montana\n elif dado <= 95:\n lugar_actual = lago\n else:\n lugar_actual = viajero\n print(lugar_actual.descripcion)\n print('a)', lugar_actual.evento_1.nombre, 'b)', lugar_actual.\n evento_2.nombre)\n decision = ''\n while decision != 'a' and decision != 'b':\n decision = input()\n if decision != 'a' and decision != 'b':\n print('Esa opción no existe.')\n if decision == 'a':\n evento_actual = lugar_actual.evento_1\n else:\n evento_actual = lugar_actual.evento_2\n print(evento_actual.descripcion)\n hp, puntuacion, derrota = Funciones.interactuar(evento_actual,\n puntuacion, hp, derrota)\n print('\\n')\n Funciones.comprobar_victoria(derrota, puntuacion)\n quiere_jugar = input('\\n¿Reintentar? ').lower()\n print('\\n')\n if quiere_jugar != 'si' and 'yes' and 'y' and 's':\n puede_jugar = False\n",
"step-4": "from Clases import Lugar\nfrom Clases import Evento\nimport Dialogos\nimport Funciones\npuntuacion_necesaria = 10\nhp_inicial = 5\neventos = [Evento('dormir', 2, False, -3, 4, Dialogos.descripciones_eventos\n [0], Dialogos.descripciones_triunfos[0], Dialogos.\n descripciones_castigos[0]), Evento('cazar', 1, False, -2, 3, Dialogos.\n descripciones_eventos[1], Dialogos.descripciones_triunfos[1], Dialogos.\n descripciones_castigos[1]), Evento('comer', 2, False, 1, 1, Dialogos.\n descripciones_eventos[2], Dialogos.descripciones_triunfos[2], Dialogos.\n descripciones_castigos[2]), Evento('hablar', 0, True, -3, 2, Dialogos.\n descripciones_eventos[3], Dialogos.descripciones_triunfos[3], Dialogos.\n descripciones_castigos[3]), Evento('escalar', 0, True, -4, 3, Dialogos.\n descripciones_eventos[4], Dialogos.descripciones_triunfos[4], Dialogos.\n descripciones_castigos[4]), Evento('rodear', 0, False, -1, 3, Dialogos.\n descripciones_eventos[5], Dialogos.descripciones_triunfos[5], Dialogos.\n descripciones_castigos[5]), Evento('pescar', 2, False, -4, 2, Dialogos.\n descripciones_eventos[6], Dialogos.descripciones_triunfos[6], Dialogos.\n descripciones_castigos[6]), Evento('contar chiste', 0, True, 0, 6,\n Dialogos.descripciones_eventos[7], Dialogos.descripciones_triunfos[7],\n Dialogos.descripciones_castigos[7]), Evento('comprar', 3, False, 0, 6,\n Dialogos.descripciones_eventos[8], Dialogos.descripciones_triunfos[8],\n Dialogos.descripciones_castigos[8])]\ndormir = eventos[0]\ncazar = eventos[1]\ncomer = eventos[2]\nhablar = eventos[3]\nescalar = eventos[4]\nrodear = eventos[5]\npescar = eventos[6]\ncontar_chiste = eventos[7]\ncomprar = eventos[8]\nlugares = [Lugar(1, 20, Dialogos.descripciones_lugares[0], dormir, cazar),\n Lugar(21, 40, Dialogos.descripciones_lugares[1], comer, hablar), Lugar(\n 41, 75, Dialogos.descripciones_lugares[2], escalar, rodear), Lugar(76, \n 90, Dialogos.descripciones_lugares[3], dormir, pescar), Lugar(91, 100,\n Dialogos.descripciones_lugares[4], contar_chiste, comprar)]\nbosque = lugares[0]\nciudad = lugares[1]\nmontana = lugares[2]\nlago = lugares[3]\nviajero = lugares[4]\nprint(Dialogos.saludo[0])\nnombre = input(Dialogos.saludo[1])\nedad = int(input(Dialogos.saludo[2]))\nprint('\\nHola', nombre, 'tienes,', edad, 'años.')\nif edad >= 18:\n print('¡Tienes edad suficiente para jugar!')\n quiere_jugar = input('¿Quieres jugar? ').lower()\n if quiere_jugar == 'si' or 'yes' or 'y' or 's':\n puede_jugar = True\n print('\\n¡Comienza la aventura! (HP = 5)\\n')\n else:\n puede_jugar = False\n print('Adiós...')\nelif edad >= 13:\n print('¡Puedes jugar bajo supervisión!')\n quiere_jugar = input('¿Quieres jugar? ').lower()\n if quiere_jugar == 'si':\n puede_jugar = True\n print('\\n¡Comienza la aventura!\\n')\n else:\n puede_jugar = False\n print('Adiós...')\nelse:\n puede_jugar = False\n print('¡Eres muy joven para jugar!')\n print('Adiós...')\nwhile puede_jugar:\n puntuacion = 0\n hp = hp_inicial\n derrota = False\n while puntuacion < puntuacion_necesaria and derrota == False:\n dado = Funciones.roll_dice(100)\n if dado <= 20:\n lugar_actual = bosque\n elif dado <= 35:\n lugar_actual = ciudad\n elif dado <= 65:\n lugar_actual = montana\n elif dado <= 95:\n lugar_actual = lago\n else:\n lugar_actual = viajero\n print(lugar_actual.descripcion)\n print('a)', lugar_actual.evento_1.nombre, 'b)', lugar_actual.\n evento_2.nombre)\n decision = ''\n while decision != 'a' and decision != 'b':\n decision = input()\n if decision != 'a' and decision != 'b':\n print('Esa opción no existe.')\n if decision == 'a':\n evento_actual = lugar_actual.evento_1\n else:\n evento_actual = lugar_actual.evento_2\n print(evento_actual.descripcion)\n hp, puntuacion, derrota = Funciones.interactuar(evento_actual,\n puntuacion, hp, derrota)\n print('\\n')\n Funciones.comprobar_victoria(derrota, puntuacion)\n quiere_jugar = input('\\n¿Reintentar? ').lower()\n print('\\n')\n if quiere_jugar != 'si' and 'yes' and 'y' and 's':\n puede_jugar = False\n",
"step-5": "# -*- coding: utf-8 -*-\r\nfrom Clases import Lugar\r\nfrom Clases import Evento\r\n\r\nimport Dialogos\r\nimport Funciones\r\n\r\npuntuacion_necesaria = 10\r\nhp_inicial = 5\r\n\r\n\r\neventos = [\r\n Evento(\"dormir\", 2, False, -3, 4, Dialogos.descripciones_eventos[0], Dialogos.descripciones_triunfos[0],\r\n Dialogos.descripciones_castigos[0]),\r\n Evento(\"cazar\", 1, False, -2, 3, Dialogos.descripciones_eventos[1], Dialogos.descripciones_triunfos[1],\r\n Dialogos.descripciones_castigos[1]),\r\n Evento(\"comer\", 2, False, 1, 1, Dialogos.descripciones_eventos[2], Dialogos.descripciones_triunfos[2],\r\n Dialogos.descripciones_castigos[2]),\r\n Evento(\"hablar\", 0, True, -3, 2, Dialogos.descripciones_eventos[3], Dialogos.descripciones_triunfos[3],\r\n Dialogos.descripciones_castigos[3]),\r\n Evento(\"escalar\", 0, True, -4, 3, Dialogos.descripciones_eventos[4], Dialogos.descripciones_triunfos[4],\r\n Dialogos.descripciones_castigos[4]),\r\n Evento(\"rodear\", 0, False, -1, 3, Dialogos.descripciones_eventos[5], Dialogos.descripciones_triunfos[5],\r\n Dialogos.descripciones_castigos[5]),\r\n Evento(\"pescar\", 2, False, -4, 2, Dialogos.descripciones_eventos[6], Dialogos.descripciones_triunfos[6],\r\n Dialogos.descripciones_castigos[6]),\r\n Evento(\"contar chiste\", 0, True, 0, 6, Dialogos.descripciones_eventos[7], Dialogos.descripciones_triunfos[7],\r\n Dialogos.descripciones_castigos[7]),\r\n Evento(\"comprar\", 3, False, 0, 6, Dialogos.descripciones_eventos[8], Dialogos.descripciones_triunfos[8],\r\n Dialogos.descripciones_castigos[8])\r\n]\r\n\r\ndormir = eventos[0]\r\ncazar = eventos[1]\r\ncomer = eventos[2]\r\nhablar = eventos[3]\r\nescalar = eventos[4]\r\nrodear = eventos[5]\r\npescar = eventos[6]\r\ncontar_chiste = eventos[7]\r\ncomprar = eventos[8]\r\n\r\nlugares = [\r\n Lugar(1, 20, Dialogos.descripciones_lugares[0], dormir, cazar),\r\n Lugar(21, 40, Dialogos.descripciones_lugares[1], comer, hablar),\r\n Lugar(41, 75, Dialogos.descripciones_lugares[2], escalar, rodear),\r\n Lugar(76, 90, Dialogos.descripciones_lugares[3], dormir, pescar),\r\n Lugar(91, 100, Dialogos.descripciones_lugares[4], contar_chiste, comprar)\r\n]\r\n\r\nbosque = lugares[0]\r\nciudad = lugares[1]\r\nmontana = lugares[2]\r\nlago = lugares[3]\r\nviajero = lugares[4]\r\n\r\nprint(Dialogos.saludo[0])\r\nnombre = input(Dialogos.saludo[1])\r\nedad = int(input(Dialogos.saludo[2]))\r\n\r\nprint(\"\\nHola\", nombre, \"tienes,\", edad, \"años.\")\r\n\r\nif edad >= 18:\r\n print(\"¡Tienes edad suficiente para jugar!\")\r\n quiere_jugar = input(\"¿Quieres jugar? \").lower()\r\n\r\n if quiere_jugar == \"si\" or \"yes\" or \"y\" or \"s\":\r\n puede_jugar = True\r\n print(\"\\n¡Comienza la aventura! (HP = 5)\\n\")\r\n\r\n else:\r\n puede_jugar = False\r\n print(\"Adiós...\")\r\n\r\nelif edad >= 13:\r\n print(\"¡Puedes jugar bajo supervisión!\")\r\n quiere_jugar = input(\"¿Quieres jugar? \").lower()\r\n\r\n if quiere_jugar == \"si\":\r\n puede_jugar = True\r\n print(\"\\n¡Comienza la aventura!\\n\")\r\n else:\r\n puede_jugar = False\r\n print(\"Adiós...\")\r\n\r\nelse:\r\n puede_jugar = False\r\n print(\"¡Eres muy joven para jugar!\")\r\n print(\"Adiós...\")\r\n\r\nwhile puede_jugar:\r\n puntuacion = 0\r\n hp = hp_inicial\r\n derrota = False\r\n\r\n while puntuacion < puntuacion_necesaria and derrota == False:\r\n dado = Funciones.roll_dice(100)\r\n if dado <= 20:\r\n lugar_actual = bosque\r\n elif dado <= 35:\r\n lugar_actual = ciudad\r\n elif dado <= 65:\r\n lugar_actual = montana\r\n elif dado <= 95:\r\n lugar_actual = lago\r\n else:\r\n lugar_actual = viajero\r\n\r\n print(lugar_actual.descripcion)\r\n print(\"a)\", lugar_actual.evento_1.nombre, \"b)\", lugar_actual.evento_2.nombre)\r\n decision = \"\"\r\n\r\n while decision != \"a\" and decision != \"b\":\r\n decision = input()\r\n\r\n if decision != \"a\" and decision != \"b\":\r\n print(\"Esa opción no existe.\")\r\n\r\n if decision == \"a\":\r\n evento_actual = lugar_actual.evento_1\r\n else:\r\n evento_actual = lugar_actual.evento_2\r\n\r\n print(evento_actual.descripcion)\r\n\r\n (hp, puntuacion, derrota) = Funciones.interactuar(evento_actual, puntuacion, hp, derrota)\r\n print(\"\\n\")\r\n\r\n Funciones.comprobar_victoria(derrota, puntuacion)\r\n\r\n quiere_jugar = input(\"\\n¿Reintentar? \").lower()\r\n print(\"\\n\")\r\n if quiere_jugar != \"si\" and \"yes\" and \"y\" and \"s\":\r\n puede_jugar = False",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pymel.core as PM
import socket
def getShadingGroupMembership():
'''
Get a dictionary of shading group set information
{'shadingGroup': [assignmnet1, assignment2...]}
'''
result = {}
#sgs = PM.ls(sl= 1, et='shadingEngine')
sgs = PM.listConnections(s= 1, t='shadingEngine')
for sg in sgs:
result[sg.name()] = sg.members(flatten=True)
return result
def remoteMaye(msg):
global maya
maya.send(msg)
def vmtl_nameMap(name):
whiteList = ['woman_Rig:surfaceShader1',
'lady_Rig:surfaceShader1',
'richman_rigging_master:richman_spot',
'oldman_Rig:surfaceShader1']
if name == 'oldman_Rig:VRayMtl2':
name = 'richPeopleSuck:oldman_cloth_vmtl'
if name == 'oldman_Rig:VRayMtl3':
name = 'richPeopleSuck:oldman_skin_vmtl'
if name == 'oldman_Rig:VRayMtl4':
name = 'richPeopleSuck:oldman_glass_vmtl'
if name == 'lady_Rig:VRayMtl2':
name = 'richPeopleSuck:lady_cloth_vmtl'
if name == 'lady_Rig:VRayMtl1':
name = 'richPeopleSuck:lady_skin_vmtl'
if name == 'woman_Rig:VRayMtl1':
name = 'richPeopleSuck:woman_cloth_vmtl'
if name == 'woman_Rig:VRayMtl2':
name = 'richPeopleSuck:woman_skin_vmtl'
if name == 'richman_rigging_master:VRayMtl2':
name = 'richPeopleSuck:richman_cloth_vmtl'
if name == 'richman_rigging_master:VRayMtl1':
name = 'richPeopleSuck:richman_skin_vmtl'
if name == 'richman_rigging_master:surfaceShader3':
name = 'richPeopleSuck:maneye_black_surface'
if name in whiteList:
name = 'richPeopleSuck:maneye_white_surface'
return name
def doJob(port):
host = "127.0.0.1"
global maya
maya = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
maya.connect( (host, port) )
mtlDict = getShadingGroupMembership()
for meshList in mtlDict.keys():
vmtl = cmds.listConnections(meshList + '.surfaceShader', s= 1)[0]
if mtlDict[meshList]:
for mesh in mtlDict[meshList]:
msg = ''
target = ''
if '.' in str(mesh):
faceList = []
faceStr = str(mesh).split('.f')[1].replace('[', '').replace(']', '')
if ',' in faceStr:
faceList = faceStr.split(',')
else:
faceList = [faceStr]
for face in faceList:
target = str(mesh).split('.')[0] + '.f[' + face + ']'
try:
msg += 'cmds.select("' + target + '", r= 1)\n'
msg += 'cmds.hyperShade(a= "' + vmtl_nameMap(vmtl) + '")\n'
except:
if len(target.split(':')) > 1:
target_1 = ':'.join(target.split(':')[0:2]) + ']'
target_2 = ':'.join([target.split(':')[0], target.split(':')[2]])
try:
msg += 'cmds.select("' + target_1 + '", r= 1)\n'
msg += 'cmds.hyperShade(a= "' + vmtl_nameMap(vmtl) + '")\n'
except:
print '+++++++++++++++++++++++++++++++++++++\n+++++++++++++++++++++++++++++++++++++'
else:
target = str(mesh)
msg += 'cmds.select("' + target + '", r= 1)\n'
msg += 'cmds.hyperShade(a= "' + vmtl_nameMap(vmtl) + '")\n'
remoteMaye(msg)
maya.close()
|
normal
|
{
"blob_id": "4e38ad17ad66ac71b0df3cbcaa33cb546e96ce9d",
"index": 2257,
"step-1": "import pymel.core as PM\nimport socket\n\ndef getShadingGroupMembership():\n '''\n Get a dictionary of shading group set information\n {'shadingGroup': [assignmnet1, assignment2...]}\n '''\n result = {}\n #sgs = PM.ls(sl= 1, et='shadingEngine')\n sgs = PM.listConnections(s= 1, t='shadingEngine')\n for sg in sgs:\n result[sg.name()] = sg.members(flatten=True)\n return result\n\ndef remoteMaye(msg):\n global maya\n maya.send(msg)\n\ndef vmtl_nameMap(name):\n whiteList = ['woman_Rig:surfaceShader1',\n 'lady_Rig:surfaceShader1',\n 'richman_rigging_master:richman_spot',\n 'oldman_Rig:surfaceShader1']\n if name == 'oldman_Rig:VRayMtl2':\n name = 'richPeopleSuck:oldman_cloth_vmtl'\n if name == 'oldman_Rig:VRayMtl3':\n name = 'richPeopleSuck:oldman_skin_vmtl'\n if name == 'oldman_Rig:VRayMtl4':\n name = 'richPeopleSuck:oldman_glass_vmtl'\n if name == 'lady_Rig:VRayMtl2':\n name = 'richPeopleSuck:lady_cloth_vmtl'\n if name == 'lady_Rig:VRayMtl1':\n name = 'richPeopleSuck:lady_skin_vmtl'\n if name == 'woman_Rig:VRayMtl1':\n name = 'richPeopleSuck:woman_cloth_vmtl'\n if name == 'woman_Rig:VRayMtl2':\n name = 'richPeopleSuck:woman_skin_vmtl'\n if name == 'richman_rigging_master:VRayMtl2':\n name = 'richPeopleSuck:richman_cloth_vmtl'\n if name == 'richman_rigging_master:VRayMtl1':\n name = 'richPeopleSuck:richman_skin_vmtl'\n if name == 'richman_rigging_master:surfaceShader3':\n name = 'richPeopleSuck:maneye_black_surface'\n if name in whiteList:\n name = 'richPeopleSuck:maneye_white_surface'\n\n return name\n\n\ndef doJob(port):\n\n host = \"127.0.0.1\"\n global maya\n maya = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n maya.connect( (host, port) )\n\n mtlDict = getShadingGroupMembership()\n\n for meshList in mtlDict.keys():\n vmtl = cmds.listConnections(meshList + '.surfaceShader', s= 1)[0]\n if mtlDict[meshList]:\n for mesh in mtlDict[meshList]:\n msg = ''\n target = ''\n if '.' in str(mesh):\n faceList = []\n faceStr = str(mesh).split('.f')[1].replace('[', '').replace(']', '')\n if ',' in faceStr:\n faceList = faceStr.split(',')\n else:\n faceList = [faceStr]\n for face in faceList:\n target = str(mesh).split('.')[0] + '.f[' + face + ']'\n try:\n msg += 'cmds.select(\"' + target + '\", r= 1)\\n'\n msg += 'cmds.hyperShade(a= \"' + vmtl_nameMap(vmtl) + '\")\\n'\n except:\n if len(target.split(':')) > 1:\n target_1 = ':'.join(target.split(':')[0:2]) + ']'\n target_2 = ':'.join([target.split(':')[0], target.split(':')[2]])\n try:\n msg += 'cmds.select(\"' + target_1 + '\", r= 1)\\n'\n msg += 'cmds.hyperShade(a= \"' + vmtl_nameMap(vmtl) + '\")\\n'\n except:\n print '+++++++++++++++++++++++++++++++++++++\\n+++++++++++++++++++++++++++++++++++++'\n else:\n target = str(mesh)\n msg += 'cmds.select(\"' + target + '\", r= 1)\\n'\n msg += 'cmds.hyperShade(a= \"' + vmtl_nameMap(vmtl) + '\")\\n'\n\n remoteMaye(msg)\n\n maya.close()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Beacon:
def __init__(self, pos, sensor) ->None:
self.pos = pos
self.sensor = sensor
<|reserved_special_token_0|>
def __repr__(self) ->str:
return f'{self}'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def y(self):
return self.pos[1]
<|reserved_special_token_0|>
class Sensor:
def __init__(self, pos, beacon) ->None:
self.pos = pos
self.beacon = Beacon(beacon, self)
self.range = self.distance_to(self.beacon.pos)
self.min_x = self.x - self.range
self.min_y = self.y - self.range
self.max_x = self.x + self.range
self.max_y = self.y + self.range
def __str__(self) ->str:
return f'S{self.pos}'
def __repr__(self) ->str:
return f'{self}'
def __hash__(self) ->int:
return hash(self.pos)
@staticmethod
def from_text(text):
text = text.split()
sx, sy = int(text[2][2:-1]), int(text[3][2:-1])
bx, by = int(text[-2][2:-1]), int(text[-1][2:])
return Sensor((sx, sy), (bx, by))
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def distance_to(self, pos):
x, y = pos
return abs(self.x - x) + abs(self.y - y)
def covers(self, pos):
return self.distance_to(pos) <= self.range
class Grid:
def __init__(self, sensors, beacons) ->None:
self.sensors = sensors
self.beacons = beacons
@property
def min_x(self):
bx = min([b.x for b in self.beacons.values()])
sx = min([s.min_x for s in self.sensors.values()])
return min(bx, sx)
@property
def max_x(self):
bx = max([b.x for b in self.beacons.values()])
sx = max([s.max_x for s in self.sensors.values()])
return max(bx, sx)
@property
def min_y(self):
by = min([b.y for b in self.beacons.values()])
sy = min([s.min_y for s in self.sensors.values()])
return min(by, sy)
@property
def max_y(self):
by = max([b.y for b in self.beacons.values()])
sy = max([s.max_y for s in self.sensors.values()])
return max(by, sy)
def __getitem__(self, idx):
row = []
for x in range(self.min_x, self.max_x):
pos = x, idx
if pos in self.beacons:
row.append((x, self.beacons[pos]))
elif pos in self.sensors:
row.append((x, self.sensors[pos]))
else:
row.append((x, None))
return row
def __iter__(self):
self.__row = min(self.ys)
return self
def __next__(self):
row = None
if self.__row <= max(self.ys):
row = self[self.__row]
self.__row += 1
else:
raise StopIteration
return row
def is_covered(self, pos):
for s in self.sensors.values():
if s.covers(pos):
return True
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Beacon:
def __init__(self, pos, sensor) ->None:
self.pos = pos
self.sensor = sensor
def __str__(self) ->str:
return f'B{self.pos}'
def __repr__(self) ->str:
return f'{self}'
def __hash__(self) ->int:
return hash(self.pos)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
@property
def distance_to(self, pos):
x, y = pos
return abs(self.x - x) + (self.y - y)
class Sensor:
def __init__(self, pos, beacon) ->None:
self.pos = pos
self.beacon = Beacon(beacon, self)
self.range = self.distance_to(self.beacon.pos)
self.min_x = self.x - self.range
self.min_y = self.y - self.range
self.max_x = self.x + self.range
self.max_y = self.y + self.range
def __str__(self) ->str:
return f'S{self.pos}'
def __repr__(self) ->str:
return f'{self}'
def __hash__(self) ->int:
return hash(self.pos)
@staticmethod
def from_text(text):
text = text.split()
sx, sy = int(text[2][2:-1]), int(text[3][2:-1])
bx, by = int(text[-2][2:-1]), int(text[-1][2:])
return Sensor((sx, sy), (bx, by))
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def distance_to(self, pos):
x, y = pos
return abs(self.x - x) + abs(self.y - y)
def covers(self, pos):
return self.distance_to(pos) <= self.range
class Grid:
def __init__(self, sensors, beacons) ->None:
self.sensors = sensors
self.beacons = beacons
@property
def min_x(self):
bx = min([b.x for b in self.beacons.values()])
sx = min([s.min_x for s in self.sensors.values()])
return min(bx, sx)
@property
def max_x(self):
bx = max([b.x for b in self.beacons.values()])
sx = max([s.max_x for s in self.sensors.values()])
return max(bx, sx)
@property
def min_y(self):
by = min([b.y for b in self.beacons.values()])
sy = min([s.min_y for s in self.sensors.values()])
return min(by, sy)
@property
def max_y(self):
by = max([b.y for b in self.beacons.values()])
sy = max([s.max_y for s in self.sensors.values()])
return max(by, sy)
def __getitem__(self, idx):
row = []
for x in range(self.min_x, self.max_x):
pos = x, idx
if pos in self.beacons:
row.append((x, self.beacons[pos]))
elif pos in self.sensors:
row.append((x, self.sensors[pos]))
else:
row.append((x, None))
return row
def __iter__(self):
self.__row = min(self.ys)
return self
def __next__(self):
row = None
if self.__row <= max(self.ys):
row = self[self.__row]
self.__row += 1
else:
raise StopIteration
return row
def is_covered(self, pos):
for s in self.sensors.values():
if s.covers(pos):
return True
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Beacon:
def __init__(self, pos, sensor) ->None:
self.pos = pos
self.sensor = sensor
def __str__(self) ->str:
return f'B{self.pos}'
def __repr__(self) ->str:
return f'{self}'
def __hash__(self) ->int:
return hash(self.pos)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
@property
def distance_to(self, pos):
x, y = pos
return abs(self.x - x) + (self.y - y)
class Sensor:
def __init__(self, pos, beacon) ->None:
self.pos = pos
self.beacon = Beacon(beacon, self)
self.range = self.distance_to(self.beacon.pos)
self.min_x = self.x - self.range
self.min_y = self.y - self.range
self.max_x = self.x + self.range
self.max_y = self.y + self.range
def __str__(self) ->str:
return f'S{self.pos}'
def __repr__(self) ->str:
return f'{self}'
def __hash__(self) ->int:
return hash(self.pos)
@staticmethod
def from_text(text):
text = text.split()
sx, sy = int(text[2][2:-1]), int(text[3][2:-1])
bx, by = int(text[-2][2:-1]), int(text[-1][2:])
return Sensor((sx, sy), (bx, by))
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def distance_to(self, pos):
x, y = pos
return abs(self.x - x) + abs(self.y - y)
def covers(self, pos):
return self.distance_to(pos) <= self.range
class Grid:
def __init__(self, sensors, beacons) ->None:
self.sensors = sensors
self.beacons = beacons
@property
def min_x(self):
bx = min([b.x for b in self.beacons.values()])
sx = min([s.min_x for s in self.sensors.values()])
return min(bx, sx)
@property
def max_x(self):
bx = max([b.x for b in self.beacons.values()])
sx = max([s.max_x for s in self.sensors.values()])
return max(bx, sx)
@property
def min_y(self):
by = min([b.y for b in self.beacons.values()])
sy = min([s.min_y for s in self.sensors.values()])
return min(by, sy)
@property
def max_y(self):
by = max([b.y for b in self.beacons.values()])
sy = max([s.max_y for s in self.sensors.values()])
return max(by, sy)
def __getitem__(self, idx):
row = []
for x in range(self.min_x, self.max_x):
pos = x, idx
if pos in self.beacons:
row.append((x, self.beacons[pos]))
elif pos in self.sensors:
row.append((x, self.sensors[pos]))
else:
row.append((x, None))
return row
def __iter__(self):
self.__row = min(self.ys)
return self
def __next__(self):
row = None
if self.__row <= max(self.ys):
row = self[self.__row]
self.__row += 1
else:
raise StopIteration
return row
def is_covered(self, pos):
for s in self.sensors.values():
if s.covers(pos):
return True
return False
<|reserved_special_token_0|>
def print_row(grid, row_idx):
r = ''
for x, v in grid[row_idx]:
if isinstance(v, Beacon):
r += 'B'
elif isinstance(v, Sensor):
r += 'S'
elif grid.is_covered((x, row_idx)):
r += '#'
else:
r += '.'
return r
<|reserved_special_token_0|>
def walk_perimeters(grid):
for sensor in grid.sensors.values():
for dx in range(sensor.range + 2):
dy = sensor.range + 1 - dx
for signx, signy in [(-1, -1), (-1, 1), (1, -1), (1, 1)]:
x = sensor.x + dx * signx
y = sensor.y + dy * signy
if not (0 <= x <= MAX_X and 0 <= y <= MAX_Y):
continue
if not grid.is_covered((x, y)):
return x * 4000000 + y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from pathlib import Path
file = Path(__file__).parent / 'input.txt'
Y = 2000000
MAX_X = 4000000
MIN_X = 0
MAX_Y = 4000000
MIN_Y = 0
text = file.read_text().splitlines()
class Beacon:
def __init__(self, pos, sensor) ->None:
self.pos = pos
self.sensor = sensor
def __str__(self) ->str:
return f'B{self.pos}'
def __repr__(self) ->str:
return f'{self}'
def __hash__(self) ->int:
return hash(self.pos)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
@property
def distance_to(self, pos):
x, y = pos
return abs(self.x - x) + (self.y - y)
class Sensor:
def __init__(self, pos, beacon) ->None:
self.pos = pos
self.beacon = Beacon(beacon, self)
self.range = self.distance_to(self.beacon.pos)
self.min_x = self.x - self.range
self.min_y = self.y - self.range
self.max_x = self.x + self.range
self.max_y = self.y + self.range
def __str__(self) ->str:
return f'S{self.pos}'
def __repr__(self) ->str:
return f'{self}'
def __hash__(self) ->int:
return hash(self.pos)
@staticmethod
def from_text(text):
text = text.split()
sx, sy = int(text[2][2:-1]), int(text[3][2:-1])
bx, by = int(text[-2][2:-1]), int(text[-1][2:])
return Sensor((sx, sy), (bx, by))
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def distance_to(self, pos):
x, y = pos
return abs(self.x - x) + abs(self.y - y)
def covers(self, pos):
return self.distance_to(pos) <= self.range
class Grid:
def __init__(self, sensors, beacons) ->None:
self.sensors = sensors
self.beacons = beacons
@property
def min_x(self):
bx = min([b.x for b in self.beacons.values()])
sx = min([s.min_x for s in self.sensors.values()])
return min(bx, sx)
@property
def max_x(self):
bx = max([b.x for b in self.beacons.values()])
sx = max([s.max_x for s in self.sensors.values()])
return max(bx, sx)
@property
def min_y(self):
by = min([b.y for b in self.beacons.values()])
sy = min([s.min_y for s in self.sensors.values()])
return min(by, sy)
@property
def max_y(self):
by = max([b.y for b in self.beacons.values()])
sy = max([s.max_y for s in self.sensors.values()])
return max(by, sy)
def __getitem__(self, idx):
row = []
for x in range(self.min_x, self.max_x):
pos = x, idx
if pos in self.beacons:
row.append((x, self.beacons[pos]))
elif pos in self.sensors:
row.append((x, self.sensors[pos]))
else:
row.append((x, None))
return row
def __iter__(self):
self.__row = min(self.ys)
return self
def __next__(self):
row = None
if self.__row <= max(self.ys):
row = self[self.__row]
self.__row += 1
else:
raise StopIteration
return row
def is_covered(self, pos):
for s in self.sensors.values():
if s.covers(pos):
return True
return False
beacons = {}
sensors = {}
for line in text:
s = Sensor.from_text(line)
beacons[s.beacon.pos] = s.beacon
sensors[s.pos] = s
grid = Grid(sensors, beacons)
def print_row(grid, row_idx):
r = ''
for x, v in grid[row_idx]:
if isinstance(v, Beacon):
r += 'B'
elif isinstance(v, Sensor):
r += 'S'
elif grid.is_covered((x, row_idx)):
r += '#'
else:
r += '.'
return r
def count_covered(prow):
count = 0
for c in prow:
if c == '#':
count += 1
return count
print('Part 1:', count_covered(print_row(grid, Y)))
def walk_perimeters(grid):
for sensor in grid.sensors.values():
for dx in range(sensor.range + 2):
dy = sensor.range + 1 - dx
for signx, signy in [(-1, -1), (-1, 1), (1, -1), (1, 1)]:
x = sensor.x + dx * signx
y = sensor.y + dy * signy
if not (0 <= x <= MAX_X and 0 <= y <= MAX_Y):
continue
if not grid.is_covered((x, y)):
return x * 4000000 + y
print('Part 2:', walk_perimeters(grid))
<|reserved_special_token_1|>
from pathlib import Path
file = Path(__file__).parent / 'input.txt'
Y = 2000000
MAX_X = 4000000
MIN_X = 0
MAX_Y = 4000000
MIN_Y = 0
# file = Path(__file__).parent / 'test_input.txt'
# Y = 10
# MAX_X = 20
# MIN_X = 0
# MAX_Y = 20
# MIN_Y = 0
text = file.read_text().splitlines()
class Beacon():
def __init__(self, pos, sensor) -> None:
self.pos = pos
self.sensor = sensor
def __str__(self) -> str:
return f"B{self.pos}"
def __repr__(self) -> str:
return f"{self}"
def __hash__(self) -> int:
return hash(self.pos)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
@property
def distance_to(self, pos):
x,y = pos
return abs(self.x - x) + (self.y - y)
class Sensor():
def __init__(self, pos, beacon) -> None:
self.pos = pos
self.beacon = Beacon(beacon, self)
self.range = self.distance_to(self.beacon.pos)
self.min_x = self.x - self.range
self.min_y = self.y - self.range
self.max_x = self.x + self.range
self.max_y = self.y + self.range
def __str__(self) -> str:
return f"S{self.pos}"
def __repr__(self) -> str:
return f"{self}"
def __hash__(self) -> int:
return hash(self.pos)
@staticmethod
def from_text(text):
text = text.split()
sx, sy = int(text[2][2:-1]), int(text[3][2:-1])
bx, by = int(text[-2][2:-1]), int(text[-1][2:])
return Sensor((sx,sy), (bx,by))
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def distance_to(self, pos):
x,y = pos
return abs(self.x - x) + abs(self.y - y)
def covers(self, pos):
return self.distance_to(pos) <= self.range
class Grid():
def __init__(self, sensors, beacons) -> None:
self.sensors = sensors
self.beacons = beacons
@property
def min_x(self):
bx = min([b.x for b in self.beacons.values()])
sx = min([s.min_x for s in self.sensors.values()])
return min(bx, sx)
@property
def max_x(self):
bx = max([b.x for b in self.beacons.values()])
sx = max([s.max_x for s in self.sensors.values()])
return max(bx, sx)
@property
def min_y(self):
by = min([b.y for b in self.beacons.values()])
sy = min([s.min_y for s in self.sensors.values()])
return min(by, sy)
@property
def max_y(self):
by = max([b.y for b in self.beacons.values()])
sy = max([s.max_y for s in self.sensors.values()])
return max(by, sy)
def __getitem__(self, idx):
row = []
for x in range(self.min_x, self.max_x):
pos = (x, idx)
if pos in self.beacons:
row.append((x, self.beacons[pos]))
elif pos in self.sensors:
row.append((x, self.sensors[pos]))
else:
row.append((x, None))
return row
def __iter__(self):
self.__row = min(self.ys)
return self
def __next__(self):
row = None
if self.__row <= max(self.ys):
row = self[self.__row]
self.__row += 1
else:
raise StopIteration
return row
def is_covered(self, pos):
for s in self.sensors.values():
if s.covers(pos):
return True
return False
beacons = {}
sensors = {}
for line in text:
s = Sensor.from_text(line)
beacons[s.beacon.pos] = s.beacon
sensors[s.pos] = s
grid = Grid(sensors, beacons)
def print_row(grid, row_idx):
r = ""
for x,v in grid[row_idx]:
if isinstance(v, Beacon):
r += 'B'
elif isinstance(v, Sensor):
r += 'S'
elif grid.is_covered((x,row_idx)):
r += '#'
else:
r += '.'
return r
def count_covered(prow):
count = 0
for c in prow:
if c == '#':
count += 1
return count
print("Part 1:", count_covered(print_row(grid, Y)))
def walk_perimeters(grid):
for sensor in grid.sensors.values():
# walk the perimeter and check if each adjacent position is
# covered. If not, we have a winner
for dx in range(sensor.range + 2):
dy = (sensor.range + 1) - dx
for signx, signy in [(-1,-1),(-1,1),(1,-1),(1,1)]:
x = sensor.x + (dx * signx)
y = sensor.y + (dy * signy)
if not(0 <= x <= MAX_X and 0 <= y <= MAX_Y):
continue
if not grid.is_covered((x,y)):
return x * 4000000 + y
print("Part 2:", walk_perimeters(grid))
|
flexible
|
{
"blob_id": "f3a1a926feabcabc870f0a41ae239939c331d09d",
"index": 4106,
"step-1": "<mask token>\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n <mask token>\n\n def __repr__(self) ->str:\n return f'{self}'\n <mask token>\n <mask token>\n\n @property\n def y(self):\n return self.pos[1]\n <mask token>\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) ->str:\n return f'B{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n @property\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + (self.y - y)\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) ->str:\n return f'B{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n @property\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + (self.y - y)\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\n<mask token>\n\n\ndef print_row(grid, row_idx):\n r = ''\n for x, v in grid[row_idx]:\n if isinstance(v, Beacon):\n r += 'B'\n elif isinstance(v, Sensor):\n r += 'S'\n elif grid.is_covered((x, row_idx)):\n r += '#'\n else:\n r += '.'\n return r\n\n\n<mask token>\n\n\ndef walk_perimeters(grid):\n for sensor in grid.sensors.values():\n for dx in range(sensor.range + 2):\n dy = sensor.range + 1 - dx\n for signx, signy in [(-1, -1), (-1, 1), (1, -1), (1, 1)]:\n x = sensor.x + dx * signx\n y = sensor.y + dy * signy\n if not (0 <= x <= MAX_X and 0 <= y <= MAX_Y):\n continue\n if not grid.is_covered((x, y)):\n return x * 4000000 + y\n\n\n<mask token>\n",
"step-4": "from pathlib import Path\nfile = Path(__file__).parent / 'input.txt'\nY = 2000000\nMAX_X = 4000000\nMIN_X = 0\nMAX_Y = 4000000\nMIN_Y = 0\ntext = file.read_text().splitlines()\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) ->str:\n return f'B{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n @property\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + (self.y - y)\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\nbeacons = {}\nsensors = {}\nfor line in text:\n s = Sensor.from_text(line)\n beacons[s.beacon.pos] = s.beacon\n sensors[s.pos] = s\ngrid = Grid(sensors, beacons)\n\n\ndef print_row(grid, row_idx):\n r = ''\n for x, v in grid[row_idx]:\n if isinstance(v, Beacon):\n r += 'B'\n elif isinstance(v, Sensor):\n r += 'S'\n elif grid.is_covered((x, row_idx)):\n r += '#'\n else:\n r += '.'\n return r\n\n\ndef count_covered(prow):\n count = 0\n for c in prow:\n if c == '#':\n count += 1\n return count\n\n\nprint('Part 1:', count_covered(print_row(grid, Y)))\n\n\ndef walk_perimeters(grid):\n for sensor in grid.sensors.values():\n for dx in range(sensor.range + 2):\n dy = sensor.range + 1 - dx\n for signx, signy in [(-1, -1), (-1, 1), (1, -1), (1, 1)]:\n x = sensor.x + dx * signx\n y = sensor.y + dy * signy\n if not (0 <= x <= MAX_X and 0 <= y <= MAX_Y):\n continue\n if not grid.is_covered((x, y)):\n return x * 4000000 + y\n\n\nprint('Part 2:', walk_perimeters(grid))\n",
"step-5": "from pathlib import Path\n\nfile = Path(__file__).parent / 'input.txt'\nY = 2000000\nMAX_X = 4000000\nMIN_X = 0\nMAX_Y = 4000000\nMIN_Y = 0\n\n# file = Path(__file__).parent / 'test_input.txt'\n# Y = 10\n# MAX_X = 20\n# MIN_X = 0\n# MAX_Y = 20\n# MIN_Y = 0\n\ntext = file.read_text().splitlines()\n\n\nclass Beacon():\n def __init__(self, pos, sensor) -> None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) -> str:\n return f\"B{self.pos}\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n\n def __hash__(self) -> int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n \n @property\n def y(self):\n return self.pos[1]\n \n @property\n def distance_to(self, pos):\n x,y = pos\n return abs(self.x - x) + (self.y - y)\n\nclass Sensor():\n def __init__(self, pos, beacon) -> None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) -> str:\n return f\"S{self.pos}\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n\n def __hash__(self) -> int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx,sy), (bx,by))\n\n @property\n def x(self):\n return self.pos[0]\n \n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x,y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n \n\nclass Grid():\n def __init__(self, sensors, beacons) -> None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = (x, idx)\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n\n return False\n\nbeacons = {}\nsensors = {}\n\nfor line in text:\n s = Sensor.from_text(line)\n beacons[s.beacon.pos] = s.beacon\n sensors[s.pos] = s\n\ngrid = Grid(sensors, beacons)\n\ndef print_row(grid, row_idx):\n r = \"\"\n for x,v in grid[row_idx]:\n if isinstance(v, Beacon):\n r += 'B'\n elif isinstance(v, Sensor):\n r += 'S'\n elif grid.is_covered((x,row_idx)):\n r += '#'\n else:\n r += '.'\n\n return r\n\ndef count_covered(prow):\n count = 0\n for c in prow:\n if c == '#':\n count += 1\n return count\n\nprint(\"Part 1:\", count_covered(print_row(grid, Y)))\n\ndef walk_perimeters(grid):\n for sensor in grid.sensors.values():\n # walk the perimeter and check if each adjacent position is \n # covered. If not, we have a winner\n for dx in range(sensor.range + 2):\n dy = (sensor.range + 1) - dx\n for signx, signy in [(-1,-1),(-1,1),(1,-1),(1,1)]:\n x = sensor.x + (dx * signx)\n y = sensor.y + (dy * signy)\n \n if not(0 <= x <= MAX_X and 0 <= y <= MAX_Y):\n continue\n \n if not grid.is_covered((x,y)):\n return x * 4000000 + y\n\n\nprint(\"Part 2:\", walk_perimeters(grid))",
"step-ids": [
24,
28,
30,
34,
35
]
}
|
[
24,
28,
30,
34,
35
] |
from fixate.reporting.csv import register_csv, unregister_csv
|
normal
|
{
"blob_id": "c70db0fc9d98657e318ecab7eb8af60cc2b19a2c",
"index": 4145,
"step-1": "<mask token>\n",
"step-2": "from fixate.reporting.csv import register_csv, unregister_csv\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""Some random mathematical helper functions.
"""
from __future__ import division, print_function
import math
# STATISTICS
def mean(L):
"""Calculate mean of given List"""
return sum(L) / len(L)
def variance(L, is_sample=0):
"""calculate variance (or sample variance) of given List"""
m = mean(L)
return sum((x-m)**2 for x in L) / (len(L) - is_sample)
def std_dev(L, is_sample=0):
"""calculate standard deviation of given List"""
return math.sqrt(variance(L, is_sample))
def z_score(num, mean, std_dev):
"""calculate z-score given sample size, mean and standard deviation"""
return (num - mean) / std_dev
# COMBINATORICS
def fac(n):
assert n >= 0
return n if n <= 2 else fac(n - 1) * n
def over(n, k):
"""n over k"""
return fac(n) // fac(n-k)
def coin(coins, heads):
"""Probability for given number of heads (or tails) when throwing given
number of fair coins."""
return Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c)
def pick_grom_group(group, other, selected):
"""When selecting 'selected' number of individuums from 'group' and 'other',
return probability that all are from 'group'."""
return Faction(over(group, selected), over(group + other, selected))
def unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):
"""Calculate probability for pulling a coin from a bag with fair and unfair
coins and flipping it a number of times, each time coming up heads."""
part_fair = (num_coins - num_unfair) / num_coins
part_unfair = num_unfair / num_coins
prob_fair = 0.5**heads_needed
prob_unfair = (percent_unfair / 100)**heads_needed
return part_fair * prob_fair + part_unfair * prob_unfair
# GEOMETRY
def herons_formula(a, b, c):
"""Calculate area of triangle with sides a, b, and c."""
print("sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2")
s = (a + b + c) / 2
return math.sqrt(s * (s-a) * (s-b) * (s-c))
def area_equilat(side):
"""Area of equilateral triangle."""
return side/2 * math.sqrt(side**2 - (side/2)**2)
# LINEAR ALGEBRA
def inv(a,b,c,d):
"""Inverse of 2x2 matrix."""
det = a*d-b*c
m = lambda x: fractions.Fraction(x, det)
return map(str, map(m, [d, -b, -c, a]))
def det2(m):
"""Determinant of 2x2 matrix."""
(a,b), (c,d) = m
return a*d - b*c
def det3(m):
"""Determinant of 3x3 matrix."""
a, b, c = m[0]
da = det2([ m[1][1:] , m[2][1:]])
db = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]])
dc = det2([ m[1][:2] , m[2][:2]])
return a*da - b*db + c*dc
# SOME COMPLEX FORMULAS I NEVER CAN QUITE REMEMBER
def series(r, n):
"""Calculate geometric series."""
return (1 - r**n) / (1 - r)
def quad_form(a, b, c):
"""Quadratic Formula: calculate values of x so that ax^2+bx+c=0."""
sq = math.sqrt(b**2 - 4 * a * c)
x1 = (-b - sq) / (2 * a)
x2 = (-b + sq) / (2 * a)
return (x1, x2)
def master_method(a, b, d):
"""Estimate Complexity using Master Method, print result."""
if a == b**d:
print("Case 1: a = b^d")
print("-> O(n^%d log n)" % d)
elif a < b**d:
print("Case 2: a < b^d")
print("-> O(n^%d)" % d)
elif a > b**d:
print("Case 3: a > b^d")
print("-> O(n^log%d(%d))" % (b, a))
print(" = O(n^%.2f)" % math.log(a, b))
|
normal
|
{
"blob_id": "34acb6da1dc9403a311ce3bca0a828a77b7b36da",
"index": 7403,
"step-1": "<mask token>\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\n<mask token>\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\n<mask token>\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\n<mask token>\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n",
"step-3": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\n<mask token>\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\ndef det3(m):\n \"\"\"Determinant of 3x3 matrix.\"\"\"\n a, b, c = m[0]\n da = det2([m[1][1:], m[2][1:]])\n db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]])\n dc = det2([m[1][:2], m[2][:2]])\n return a * da - b * db + c * dc\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n",
"step-4": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\ndef unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):\n \"\"\"Calculate probability for pulling a coin from a bag with fair and unfair\n\tcoins and flipping it a number of times, each time coming up heads.\"\"\"\n part_fair = (num_coins - num_unfair) / num_coins\n part_unfair = num_unfair / num_coins\n prob_fair = 0.5 ** heads_needed\n prob_unfair = (percent_unfair / 100) ** heads_needed\n return part_fair * prob_fair + part_unfair * prob_unfair\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\ndef inv(a, b, c, d):\n \"\"\"Inverse of 2x2 matrix.\"\"\"\n det = a * d - b * c\n m = lambda x: fractions.Fraction(x, det)\n return map(str, map(m, [d, -b, -c, a]))\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\ndef det3(m):\n \"\"\"Determinant of 3x3 matrix.\"\"\"\n a, b, c = m[0]\n da = det2([m[1][1:], m[2][1:]])\n db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]])\n dc = det2([m[1][:2], m[2][:2]])\n return a * da - b * db + c * dc\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n",
"step-5": "\"\"\"Some random mathematical helper functions.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport math\n\n\n# STATISTICS\n\ndef mean(L):\n\t\"\"\"Calculate mean of given List\"\"\"\n\treturn sum(L) / len(L)\n\t\ndef variance(L, is_sample=0):\n\t\"\"\"calculate variance (or sample variance) of given List\"\"\"\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)\n\t\ndef std_dev(L, is_sample=0):\n\t\"\"\"calculate standard deviation of given List\"\"\"\n\treturn math.sqrt(variance(L, is_sample))\n\ndef z_score(num, mean, std_dev):\n\t\"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n\treturn (num - mean) / std_dev\n\n\n# COMBINATORICS\n\ndef fac(n):\n\tassert n >= 0\n\treturn n if n <= 2 else fac(n - 1) * n\n\ndef over(n, k):\n\t\"\"\"n over k\"\"\"\n\treturn fac(n) // fac(n-k)\n\ndef coin(coins, heads):\n\t\"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n\treturn Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c)\n\ndef pick_grom_group(group, other, selected):\n\t\"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n\treturn Faction(over(group, selected), over(group + other, selected))\n\ndef unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):\n\t\"\"\"Calculate probability for pulling a coin from a bag with fair and unfair\n\tcoins and flipping it a number of times, each time coming up heads.\"\"\"\n\tpart_fair = (num_coins - num_unfair) / num_coins\n\tpart_unfair = num_unfair / num_coins\n\tprob_fair = 0.5**heads_needed\n\tprob_unfair = (percent_unfair / 100)**heads_needed\n\treturn part_fair * prob_fair + part_unfair * prob_unfair\n\n\n# GEOMETRY\n\ndef herons_formula(a, b, c):\n\t\"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n\tprint(\"sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2\")\n\ts = (a + b + c) / 2\n\treturn math.sqrt(s * (s-a) * (s-b) * (s-c))\n\t\ndef area_equilat(side):\n\t\"\"\"Area of equilateral triangle.\"\"\"\n\treturn side/2 * math.sqrt(side**2 - (side/2)**2)\n\n\n# LINEAR ALGEBRA\n\ndef inv(a,b,c,d):\n\t\"\"\"Inverse of 2x2 matrix.\"\"\"\n\tdet = a*d-b*c\n\tm = lambda x: fractions.Fraction(x, det)\n\treturn map(str, map(m, [d, -b, -c, a]))\n\ndef det2(m):\n\t\"\"\"Determinant of 2x2 matrix.\"\"\"\n\t(a,b), (c,d) = m\n\treturn a*d - b*c\n\ndef det3(m):\n\t\"\"\"Determinant of 3x3 matrix.\"\"\"\n\ta, b, c = m[0]\n\tda = det2([ m[1][1:] , m[2][1:]])\n\tdb = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]])\n\tdc = det2([ m[1][:2] , m[2][:2]])\n\treturn a*da - b*db + c*dc\n\n\n# SOME COMPLEX FORMULAS I NEVER CAN QUITE REMEMBER\n\ndef series(r, n):\n\t\"\"\"Calculate geometric series.\"\"\"\n\treturn (1 - r**n) / (1 - r)\n\ndef quad_form(a, b, c):\n\t\"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n\tsq = math.sqrt(b**2 - 4 * a * c)\n\tx1 = (-b - sq) / (2 * a)\n\tx2 = (-b + sq) / (2 * a)\n\treturn (x1, x2)\n\ndef master_method(a, b, d):\n\t\"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n\tif a == b**d:\n\t\tprint(\"Case 1: a = b^d\")\n\t\tprint(\"-> O(n^%d log n)\" % d)\n\telif a < b**d:\n\t\tprint(\"Case 2: a < b^d\")\n\t\tprint(\"-> O(n^%d)\" % d)\n\telif a > b**d:\n\t\tprint(\"Case 3: a > b^d\")\n\t\tprint(\"-> O(n^log%d(%d))\" % (b, a))\n\t\tprint(\" = O(n^%.2f)\" % math.log(a, b))\n\n",
"step-ids": [
7,
14,
15,
17,
19
]
}
|
[
7,
14,
15,
17,
19
] |
"""
LeetCode Problem: 242. Valid Anagram
Link: https://leetcode.com/problems/valid-anagram/
Written by: Mostofa Adib Shakib
Language: Python
"""
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
length1 = len(s)
length2 = len(t)
if length1 != length2:
return False
s = sorted(s) #sorted the string in alphanumeric order
t = sorted(t) #sorted the string in alphanumeric order
for i in range(0, length1):
if s[i] != t[i]:
return False # return false if the two sorted strings are not the same.
return True # if the sorted strings are same return True
|
normal
|
{
"blob_id": "a4f932a8566afe0265dc1057d0f6534a608697f7",
"index": 365,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n length1 = len(s)\n length2 = len(t)\n if length1 != length2:\n return False\n s = sorted(s)\n t = sorted(t)\n for i in range(0, length1):\n if s[i] != t[i]:\n return False\n return True\n",
"step-4": "\"\"\"\nLeetCode Problem: 242. Valid Anagram\nLink: https://leetcode.com/problems/valid-anagram/\nWritten by: Mostofa Adib Shakib\nLanguage: Python\n\"\"\"\n\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n \n length1 = len(s)\n length2 = len(t)\n \n if length1 != length2:\n return False\n \n s = sorted(s) #sorted the string in alphanumeric order\n t = sorted(t) #sorted the string in alphanumeric order\n \n for i in range(0, length1):\n if s[i] != t[i]:\n return False # return false if the two sorted strings are not the same.\n\n return True # if the sorted strings are same return True",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SADQ_GQF(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def should_explore(self):
self.epsilon = self.epsilon_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=
self.epsilon, global_step=self.steps)
return random.random() < self.epsilon
def predict(self, state, isGreedy=False, is_random=False):
if self.learning:
self.steps += 1
if (self.previous_state is not None and self.learning and self.
current_reward is not None):
state_crr = np.unique(state, axis=0)
self.memory.add(self.previous_state, None, self.current_reward,
state_crr.reshape(-1, self.state_length), 0, self.features)
if self.learning and self.should_explore() and not isGreedy:
q_values = None
fv = None
choice = random.choice(list(range(len(state))))
action = choice
else:
with torch.no_grad():
features_vector, q_values = self.eval_model.predict_batch(
Tensor(state))
q_values = FloatTensor(q_values).view(-1)
_, choice = q_values.max(0)
action = choice
fv = features_vector[choice]
if (self.learning and self.steps % self.reinforce_config.
replace_frequency == 0):
logger.debug('Replacing target model for %s' % self.name)
if self.reinforce_config.replace_frequency != 1:
self.target_model.replace(self.eval_model)
else:
self.target_model.replace_soft(self.eval_model)
if (self.learning and self.steps > self.reinforce_config.
update_start and self.steps % self.reinforce_config.
update_steps == 0):
self.update_time -= time.time()
self.update()
self.update_time += time.time()
self.current_reward = 0
self.previous_state = state[action]
return choice, fv
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def end_episode(self, state):
if not self.learning:
return
episode_time = time.time() - self.episode_time
self.reward_history.append(self.total_reward)
self.episode_time_history.append(episode_time)
total_time = sum(self.episode_time_history)
avg_time = total_time / len(self.episode_time_history)
logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %
(self.episode + 1, self.total_reward, self.epsilon))
logger.debug(
'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'
% (episode_time, avg_time, self.prediction_time, self.update_time)
)
self.episode += 1
self.summary.add_scalar(tag='%s/Episode Reward' % self.name,
scalar_value=self.total_reward, global_step=self.episode)
self.memory.add(self.previous_state, None, self.current_reward,
state.reshape(-1, self.state_length), 1, self.features)
self.save()
self.reset()
def reset(self):
self.episode_time = time.time()
self.current_reward = 0
self.total_reward = 0
self.previous_state = None
self.previous_action = None
self.prediction_time = 0
self.update_time = 0
self.features = None
def restore_state(self):
restore_path = self.network_config.network_path + '/adaptive.info'
if self.network_config.network_path and os.path.exists(restore_path
) and self.memory_resotre:
logger.info('Restoring state from %s' % self.network_config.
network_path)
with open(restore_path, 'rb') as file:
info = pickle.load(file)
self.steps = info['steps']
self.episode = info['episode']
self.memory.load(self.network_config.network_path)
print('lenght of memeory: ', len(self.memory))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def passFeatures(self, features):
self.features = features.copy()
return
def summary_test(self, reward, epoch):
self.summary.add_scalar(tag='%s/eval reward' % self.name,
scalar_value=reward, global_step=epoch * 40)
<|reserved_special_token_0|>
def update(self):
if len(self.memory._storage) <= self.reinforce_config.batch_size:
return
beta = self.beta_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=
beta, global_step=self.steps)
if self.reinforce_config.use_prior_memory:
batch = self.memory.sample(self.reinforce_config.batch_size, beta)
(states, actions, reward, next_states, is_terminal, weights,
batch_idxes) = batch
self.summary.add_histogram(tag='%s/Batch Indices' % self.name,
values=Tensor(batch_idxes), global_step=self.steps)
else:
batch = self.memory.sample(self.reinforce_config.batch_size)
(states, actions, reward, next_states, is_terminal, features_vector
) = batch
states = FloatTensor(states)
terminal = FloatTensor([(1 if t else 0) for t in is_terminal])
reward = FloatTensor(reward)
features_vector = FloatTensor(features_vector)
batch_index = torch.arange(self.reinforce_config.batch_size, dtype=
torch.long)
feature_values, q_values = self.eval_model.predict_batch(states)
q_values = q_values.flatten()
q_max = []
f_max = []
for i, ns in enumerate(next_states):
feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns
).view(-1, self.state_length))
q_value_max, idx = q_n.max(0)
features_max = feature_n[idx]
q_max.append(q_value_max)
if self.network_config.version in ['v10', 'v11']:
features_max[:, :3] = features_max[:, :3] * ns[idx, 65
] / states[i, 65]
features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66
] / states[i, 66]
features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63
] / states[i, 63]
features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64
] / states[i, 64]
features_max[features_max == float('inf')] = 0
f_max.append(features_max.view(-1))
q_max = torch.stack(q_max, dim=1).view(-1)
f_max = torch.stack(f_max)
q_max = (1 - terminal) * q_max
f_max = (1 - terminal.view(-1, 1)) * f_max
q_target = reward + self.reinforce_config.discount_factor * q_max
f_target = (features_vector + self.reinforce_config.discount_factor *
f_max)
if (torch.sum(feature_values != feature_values).item() + torch.sum(
f_target != f_target)).item() > 0:
f_target[f_target != f_target] = 0
self.eval_model.fit(q_values, q_target, feature_values, f_target)
if self.reinforce_config.use_prior_memory:
td_errors = q_values - q_target
new_priorities = torch.abs(td_errors) + 1e-06
self.memory.update_priorities(batch_idxes, new_priorities.data)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, new_feature_weights, new_q_weights):
self.eval_model.feautre_model.load_state_dict(new_feature_weights)
self.eval_model.q_model.load_state_dict(new_q_weights)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SADQ_GQF(object):
<|reserved_special_token_0|>
def __init__(self, name, state_length, network_config, reinforce_config,
feature_len, combine_decomposed_func, is_sigmoid=False,
memory_resotre=True):
super(SADQ_GQF, self).__init__()
self.name = name
self.network_config = network_config
self.reinforce_config = reinforce_config
self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)
self.learning = True
self.explanation = False
self.state_length = state_length
self.features = 0
self.feature_len = feature_len
self.steps = 0
self.reward_history = []
self.episode_time_history = []
self.best_reward_mean = -maxsize
self.episode = 0
self.feature_len = feature_len
self.features = None
self.reset()
self.memory_resotre = memory_resotre
reinforce_summary_path = (self.reinforce_config.summaries_path +
'/' + self.name)
if not self.network_config.restore_network:
clear_summary_path(reinforce_summary_path)
else:
self.restore_state()
self.summary = SummaryWriter(log_dir=reinforce_summary_path)
self.eval_model = feature_q_model(name, state_length, self.
feature_len, self.network_config.output_shape, network_config)
self.target_model = feature_q_model(name, state_length, self.
feature_len, self.network_config.output_shape, network_config)
self.beta_schedule = LinearSchedule(self.reinforce_config.
beta_timesteps, initial_p=self.reinforce_config.beta_initial,
final_p=self.reinforce_config.beta_final)
self.epsilon_schedule = LinearSchedule(self.reinforce_config.
epsilon_timesteps, initial_p=self.reinforce_config.
starting_epsilon, final_p=self.reinforce_config.final_epsilon)
def should_explore(self):
self.epsilon = self.epsilon_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=
self.epsilon, global_step=self.steps)
return random.random() < self.epsilon
def predict(self, state, isGreedy=False, is_random=False):
if self.learning:
self.steps += 1
if (self.previous_state is not None and self.learning and self.
current_reward is not None):
state_crr = np.unique(state, axis=0)
self.memory.add(self.previous_state, None, self.current_reward,
state_crr.reshape(-1, self.state_length), 0, self.features)
if self.learning and self.should_explore() and not isGreedy:
q_values = None
fv = None
choice = random.choice(list(range(len(state))))
action = choice
else:
with torch.no_grad():
features_vector, q_values = self.eval_model.predict_batch(
Tensor(state))
q_values = FloatTensor(q_values).view(-1)
_, choice = q_values.max(0)
action = choice
fv = features_vector[choice]
if (self.learning and self.steps % self.reinforce_config.
replace_frequency == 0):
logger.debug('Replacing target model for %s' % self.name)
if self.reinforce_config.replace_frequency != 1:
self.target_model.replace(self.eval_model)
else:
self.target_model.replace_soft(self.eval_model)
if (self.learning and self.steps > self.reinforce_config.
update_start and self.steps % self.reinforce_config.
update_steps == 0):
self.update_time -= time.time()
self.update()
self.update_time += time.time()
self.current_reward = 0
self.previous_state = state[action]
return choice, fv
def disable_learning(self, is_save=False):
logger.info('Disabled Learning for %s agent' % self.name)
if is_save:
self.save(force=True)
self.learning = False
self.episode = 0
def enable_learning(self):
logger.info('enabled Learning for %s agent' % self.name)
self.learning = True
self.reset()
def end_episode(self, state):
if not self.learning:
return
episode_time = time.time() - self.episode_time
self.reward_history.append(self.total_reward)
self.episode_time_history.append(episode_time)
total_time = sum(self.episode_time_history)
avg_time = total_time / len(self.episode_time_history)
logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %
(self.episode + 1, self.total_reward, self.epsilon))
logger.debug(
'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'
% (episode_time, avg_time, self.prediction_time, self.update_time)
)
self.episode += 1
self.summary.add_scalar(tag='%s/Episode Reward' % self.name,
scalar_value=self.total_reward, global_step=self.episode)
self.memory.add(self.previous_state, None, self.current_reward,
state.reshape(-1, self.state_length), 1, self.features)
self.save()
self.reset()
def reset(self):
self.episode_time = time.time()
self.current_reward = 0
self.total_reward = 0
self.previous_state = None
self.previous_action = None
self.prediction_time = 0
self.update_time = 0
self.features = None
def restore_state(self):
restore_path = self.network_config.network_path + '/adaptive.info'
if self.network_config.network_path and os.path.exists(restore_path
) and self.memory_resotre:
logger.info('Restoring state from %s' % self.network_config.
network_path)
with open(restore_path, 'rb') as file:
info = pickle.load(file)
self.steps = info['steps']
self.episode = info['episode']
self.memory.load(self.network_config.network_path)
print('lenght of memeory: ', len(self.memory))
<|reserved_special_token_0|>
def reward(self, r):
self.total_reward += r
self.current_reward += r
def passFeatures(self, features):
self.features = features.copy()
return
def summary_test(self, reward, epoch):
self.summary.add_scalar(tag='%s/eval reward' % self.name,
scalar_value=reward, global_step=epoch * 40)
<|reserved_special_token_0|>
def update(self):
if len(self.memory._storage) <= self.reinforce_config.batch_size:
return
beta = self.beta_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=
beta, global_step=self.steps)
if self.reinforce_config.use_prior_memory:
batch = self.memory.sample(self.reinforce_config.batch_size, beta)
(states, actions, reward, next_states, is_terminal, weights,
batch_idxes) = batch
self.summary.add_histogram(tag='%s/Batch Indices' % self.name,
values=Tensor(batch_idxes), global_step=self.steps)
else:
batch = self.memory.sample(self.reinforce_config.batch_size)
(states, actions, reward, next_states, is_terminal, features_vector
) = batch
states = FloatTensor(states)
terminal = FloatTensor([(1 if t else 0) for t in is_terminal])
reward = FloatTensor(reward)
features_vector = FloatTensor(features_vector)
batch_index = torch.arange(self.reinforce_config.batch_size, dtype=
torch.long)
feature_values, q_values = self.eval_model.predict_batch(states)
q_values = q_values.flatten()
q_max = []
f_max = []
for i, ns in enumerate(next_states):
feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns
).view(-1, self.state_length))
q_value_max, idx = q_n.max(0)
features_max = feature_n[idx]
q_max.append(q_value_max)
if self.network_config.version in ['v10', 'v11']:
features_max[:, :3] = features_max[:, :3] * ns[idx, 65
] / states[i, 65]
features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66
] / states[i, 66]
features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63
] / states[i, 63]
features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64
] / states[i, 64]
features_max[features_max == float('inf')] = 0
f_max.append(features_max.view(-1))
q_max = torch.stack(q_max, dim=1).view(-1)
f_max = torch.stack(f_max)
q_max = (1 - terminal) * q_max
f_max = (1 - terminal.view(-1, 1)) * f_max
q_target = reward + self.reinforce_config.discount_factor * q_max
f_target = (features_vector + self.reinforce_config.discount_factor *
f_max)
if (torch.sum(feature_values != feature_values).item() + torch.sum(
f_target != f_target)).item() > 0:
f_target[f_target != f_target] = 0
self.eval_model.fit(q_values, q_target, feature_values, f_target)
if self.reinforce_config.use_prior_memory:
td_errors = q_values - q_target
new_priorities = torch.abs(td_errors) + 1e-06
self.memory.update_priorities(batch_idxes, new_priorities.data)
def load_model(self, model):
self.eval_model.replace(model)
<|reserved_special_token_0|>
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, new_feature_weights, new_q_weights):
self.eval_model.feautre_model.load_state_dict(new_feature_weights)
self.eval_model.q_model.load_state_dict(new_q_weights)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SADQ_GQF(object):
<|reserved_special_token_0|>
def __init__(self, name, state_length, network_config, reinforce_config,
feature_len, combine_decomposed_func, is_sigmoid=False,
memory_resotre=True):
super(SADQ_GQF, self).__init__()
self.name = name
self.network_config = network_config
self.reinforce_config = reinforce_config
self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)
self.learning = True
self.explanation = False
self.state_length = state_length
self.features = 0
self.feature_len = feature_len
self.steps = 0
self.reward_history = []
self.episode_time_history = []
self.best_reward_mean = -maxsize
self.episode = 0
self.feature_len = feature_len
self.features = None
self.reset()
self.memory_resotre = memory_resotre
reinforce_summary_path = (self.reinforce_config.summaries_path +
'/' + self.name)
if not self.network_config.restore_network:
clear_summary_path(reinforce_summary_path)
else:
self.restore_state()
self.summary = SummaryWriter(log_dir=reinforce_summary_path)
self.eval_model = feature_q_model(name, state_length, self.
feature_len, self.network_config.output_shape, network_config)
self.target_model = feature_q_model(name, state_length, self.
feature_len, self.network_config.output_shape, network_config)
self.beta_schedule = LinearSchedule(self.reinforce_config.
beta_timesteps, initial_p=self.reinforce_config.beta_initial,
final_p=self.reinforce_config.beta_final)
self.epsilon_schedule = LinearSchedule(self.reinforce_config.
epsilon_timesteps, initial_p=self.reinforce_config.
starting_epsilon, final_p=self.reinforce_config.final_epsilon)
def should_explore(self):
self.epsilon = self.epsilon_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=
self.epsilon, global_step=self.steps)
return random.random() < self.epsilon
def predict(self, state, isGreedy=False, is_random=False):
if self.learning:
self.steps += 1
if (self.previous_state is not None and self.learning and self.
current_reward is not None):
state_crr = np.unique(state, axis=0)
self.memory.add(self.previous_state, None, self.current_reward,
state_crr.reshape(-1, self.state_length), 0, self.features)
if self.learning and self.should_explore() and not isGreedy:
q_values = None
fv = None
choice = random.choice(list(range(len(state))))
action = choice
else:
with torch.no_grad():
features_vector, q_values = self.eval_model.predict_batch(
Tensor(state))
q_values = FloatTensor(q_values).view(-1)
_, choice = q_values.max(0)
action = choice
fv = features_vector[choice]
if (self.learning and self.steps % self.reinforce_config.
replace_frequency == 0):
logger.debug('Replacing target model for %s' % self.name)
if self.reinforce_config.replace_frequency != 1:
self.target_model.replace(self.eval_model)
else:
self.target_model.replace_soft(self.eval_model)
if (self.learning and self.steps > self.reinforce_config.
update_start and self.steps % self.reinforce_config.
update_steps == 0):
self.update_time -= time.time()
self.update()
self.update_time += time.time()
self.current_reward = 0
self.previous_state = state[action]
return choice, fv
def disable_learning(self, is_save=False):
logger.info('Disabled Learning for %s agent' % self.name)
if is_save:
self.save(force=True)
self.learning = False
self.episode = 0
def enable_learning(self):
logger.info('enabled Learning for %s agent' % self.name)
self.learning = True
self.reset()
def end_episode(self, state):
if not self.learning:
return
episode_time = time.time() - self.episode_time
self.reward_history.append(self.total_reward)
self.episode_time_history.append(episode_time)
total_time = sum(self.episode_time_history)
avg_time = total_time / len(self.episode_time_history)
logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %
(self.episode + 1, self.total_reward, self.epsilon))
logger.debug(
'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'
% (episode_time, avg_time, self.prediction_time, self.update_time)
)
self.episode += 1
self.summary.add_scalar(tag='%s/Episode Reward' % self.name,
scalar_value=self.total_reward, global_step=self.episode)
self.memory.add(self.previous_state, None, self.current_reward,
state.reshape(-1, self.state_length), 1, self.features)
self.save()
self.reset()
def reset(self):
self.episode_time = time.time()
self.current_reward = 0
self.total_reward = 0
self.previous_state = None
self.previous_action = None
self.prediction_time = 0
self.update_time = 0
self.features = None
def restore_state(self):
restore_path = self.network_config.network_path + '/adaptive.info'
if self.network_config.network_path and os.path.exists(restore_path
) and self.memory_resotre:
logger.info('Restoring state from %s' % self.network_config.
network_path)
with open(restore_path, 'rb') as file:
info = pickle.load(file)
self.steps = info['steps']
self.episode = info['episode']
self.memory.load(self.network_config.network_path)
print('lenght of memeory: ', len(self.memory))
def save(self, force=False, appendix=''):
info = {'steps': self.steps, 'best_reward_mean': self.
best_reward_mean, 'episode': self.episode}
if (len(self.reward_history) >= self.network_config.save_steps and
self.episode % self.network_config.save_steps == 0 or force):
total_reward = sum(self.reward_history[-self.network_config.
save_steps:])
current_reward_mean = total_reward / self.network_config.save_steps
if force:
print('*************saved*****************',
current_reward_mean, self.best_reward_mean)
if not force:
self.best_reward_mean = current_reward_mean
logger.info('Saving network. Found new best reward (%.2f)' %
total_reward)
self.eval_model.save_network(appendix=appendix)
self.target_model.save_network(appendix=appendix)
with open(self.network_config.network_path +
'/adaptive.info', 'wb') as file:
pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)
self.memory.save(self.network_config.network_path)
print('lenght of memeory: ', len(self.memory))
else:
logger.info('The best reward is still %.2f. Not saving' %
self.best_reward_mean)
def reward(self, r):
self.total_reward += r
self.current_reward += r
def passFeatures(self, features):
self.features = features.copy()
return
def summary_test(self, reward, epoch):
self.summary.add_scalar(tag='%s/eval reward' % self.name,
scalar_value=reward, global_step=epoch * 40)
def summary_GVFs_loss(self, loss, epoch):
self.summary.add_scalar(tag='%s/GVFs loss' % self.name,
scalar_value=loss, global_step=epoch * 40)
def update(self):
if len(self.memory._storage) <= self.reinforce_config.batch_size:
return
beta = self.beta_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=
beta, global_step=self.steps)
if self.reinforce_config.use_prior_memory:
batch = self.memory.sample(self.reinforce_config.batch_size, beta)
(states, actions, reward, next_states, is_terminal, weights,
batch_idxes) = batch
self.summary.add_histogram(tag='%s/Batch Indices' % self.name,
values=Tensor(batch_idxes), global_step=self.steps)
else:
batch = self.memory.sample(self.reinforce_config.batch_size)
(states, actions, reward, next_states, is_terminal, features_vector
) = batch
states = FloatTensor(states)
terminal = FloatTensor([(1 if t else 0) for t in is_terminal])
reward = FloatTensor(reward)
features_vector = FloatTensor(features_vector)
batch_index = torch.arange(self.reinforce_config.batch_size, dtype=
torch.long)
feature_values, q_values = self.eval_model.predict_batch(states)
q_values = q_values.flatten()
q_max = []
f_max = []
for i, ns in enumerate(next_states):
feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns
).view(-1, self.state_length))
q_value_max, idx = q_n.max(0)
features_max = feature_n[idx]
q_max.append(q_value_max)
if self.network_config.version in ['v10', 'v11']:
features_max[:, :3] = features_max[:, :3] * ns[idx, 65
] / states[i, 65]
features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66
] / states[i, 66]
features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63
] / states[i, 63]
features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64
] / states[i, 64]
features_max[features_max == float('inf')] = 0
f_max.append(features_max.view(-1))
q_max = torch.stack(q_max, dim=1).view(-1)
f_max = torch.stack(f_max)
q_max = (1 - terminal) * q_max
f_max = (1 - terminal.view(-1, 1)) * f_max
q_target = reward + self.reinforce_config.discount_factor * q_max
f_target = (features_vector + self.reinforce_config.discount_factor *
f_max)
if (torch.sum(feature_values != feature_values).item() + torch.sum(
f_target != f_target)).item() > 0:
f_target[f_target != f_target] = 0
self.eval_model.fit(q_values, q_target, feature_values, f_target)
if self.reinforce_config.use_prior_memory:
td_errors = q_values - q_target
new_priorities = torch.abs(td_errors) + 1e-06
self.memory.update_priorities(batch_idxes, new_priorities.data)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, weight_dict):
self.eval_model.load_weight(weight_dict)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, new_feature_weights, new_q_weights):
self.eval_model.feautre_model.load_state_dict(new_feature_weights)
self.eval_model.q_model.load_state_dict(new_q_weights)
<|reserved_special_token_1|>
import logging
import time
import random
import pickle
import os
from sys import maxsize
import torch
from tensorboardX import SummaryWriter
from baselines.common.schedules import LinearSchedule
from abp.utils import clear_summary_path
from abp.models.feature_q_model import feature_q_model
from abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom
import numpy as np
logger = logging.getLogger('root')
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
class SADQ_GQF(object):
"""Adaptive which uses the SADQ algorithm"""
def __init__(self, name, state_length, network_config, reinforce_config,
feature_len, combine_decomposed_func, is_sigmoid=False,
memory_resotre=True):
super(SADQ_GQF, self).__init__()
self.name = name
self.network_config = network_config
self.reinforce_config = reinforce_config
self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)
self.learning = True
self.explanation = False
self.state_length = state_length
self.features = 0
self.feature_len = feature_len
self.steps = 0
self.reward_history = []
self.episode_time_history = []
self.best_reward_mean = -maxsize
self.episode = 0
self.feature_len = feature_len
self.features = None
self.reset()
self.memory_resotre = memory_resotre
reinforce_summary_path = (self.reinforce_config.summaries_path +
'/' + self.name)
if not self.network_config.restore_network:
clear_summary_path(reinforce_summary_path)
else:
self.restore_state()
self.summary = SummaryWriter(log_dir=reinforce_summary_path)
self.eval_model = feature_q_model(name, state_length, self.
feature_len, self.network_config.output_shape, network_config)
self.target_model = feature_q_model(name, state_length, self.
feature_len, self.network_config.output_shape, network_config)
self.beta_schedule = LinearSchedule(self.reinforce_config.
beta_timesteps, initial_p=self.reinforce_config.beta_initial,
final_p=self.reinforce_config.beta_final)
self.epsilon_schedule = LinearSchedule(self.reinforce_config.
epsilon_timesteps, initial_p=self.reinforce_config.
starting_epsilon, final_p=self.reinforce_config.final_epsilon)
def should_explore(self):
self.epsilon = self.epsilon_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=
self.epsilon, global_step=self.steps)
return random.random() < self.epsilon
def predict(self, state, isGreedy=False, is_random=False):
if self.learning:
self.steps += 1
if (self.previous_state is not None and self.learning and self.
current_reward is not None):
state_crr = np.unique(state, axis=0)
self.memory.add(self.previous_state, None, self.current_reward,
state_crr.reshape(-1, self.state_length), 0, self.features)
if self.learning and self.should_explore() and not isGreedy:
q_values = None
fv = None
choice = random.choice(list(range(len(state))))
action = choice
else:
with torch.no_grad():
features_vector, q_values = self.eval_model.predict_batch(
Tensor(state))
q_values = FloatTensor(q_values).view(-1)
_, choice = q_values.max(0)
action = choice
fv = features_vector[choice]
if (self.learning and self.steps % self.reinforce_config.
replace_frequency == 0):
logger.debug('Replacing target model for %s' % self.name)
if self.reinforce_config.replace_frequency != 1:
self.target_model.replace(self.eval_model)
else:
self.target_model.replace_soft(self.eval_model)
if (self.learning and self.steps > self.reinforce_config.
update_start and self.steps % self.reinforce_config.
update_steps == 0):
self.update_time -= time.time()
self.update()
self.update_time += time.time()
self.current_reward = 0
self.previous_state = state[action]
return choice, fv
def disable_learning(self, is_save=False):
logger.info('Disabled Learning for %s agent' % self.name)
if is_save:
self.save(force=True)
self.learning = False
self.episode = 0
def enable_learning(self):
logger.info('enabled Learning for %s agent' % self.name)
self.learning = True
self.reset()
def end_episode(self, state):
if not self.learning:
return
episode_time = time.time() - self.episode_time
self.reward_history.append(self.total_reward)
self.episode_time_history.append(episode_time)
total_time = sum(self.episode_time_history)
avg_time = total_time / len(self.episode_time_history)
logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %
(self.episode + 1, self.total_reward, self.epsilon))
logger.debug(
'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'
% (episode_time, avg_time, self.prediction_time, self.update_time)
)
self.episode += 1
self.summary.add_scalar(tag='%s/Episode Reward' % self.name,
scalar_value=self.total_reward, global_step=self.episode)
self.memory.add(self.previous_state, None, self.current_reward,
state.reshape(-1, self.state_length), 1, self.features)
self.save()
self.reset()
def reset(self):
self.episode_time = time.time()
self.current_reward = 0
self.total_reward = 0
self.previous_state = None
self.previous_action = None
self.prediction_time = 0
self.update_time = 0
self.features = None
def restore_state(self):
restore_path = self.network_config.network_path + '/adaptive.info'
if self.network_config.network_path and os.path.exists(restore_path
) and self.memory_resotre:
logger.info('Restoring state from %s' % self.network_config.
network_path)
with open(restore_path, 'rb') as file:
info = pickle.load(file)
self.steps = info['steps']
self.episode = info['episode']
self.memory.load(self.network_config.network_path)
print('lenght of memeory: ', len(self.memory))
def save(self, force=False, appendix=''):
info = {'steps': self.steps, 'best_reward_mean': self.
best_reward_mean, 'episode': self.episode}
if (len(self.reward_history) >= self.network_config.save_steps and
self.episode % self.network_config.save_steps == 0 or force):
total_reward = sum(self.reward_history[-self.network_config.
save_steps:])
current_reward_mean = total_reward / self.network_config.save_steps
if force:
print('*************saved*****************',
current_reward_mean, self.best_reward_mean)
if not force:
self.best_reward_mean = current_reward_mean
logger.info('Saving network. Found new best reward (%.2f)' %
total_reward)
self.eval_model.save_network(appendix=appendix)
self.target_model.save_network(appendix=appendix)
with open(self.network_config.network_path +
'/adaptive.info', 'wb') as file:
pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)
self.memory.save(self.network_config.network_path)
print('lenght of memeory: ', len(self.memory))
else:
logger.info('The best reward is still %.2f. Not saving' %
self.best_reward_mean)
def reward(self, r):
self.total_reward += r
self.current_reward += r
def passFeatures(self, features):
self.features = features.copy()
return
def summary_test(self, reward, epoch):
self.summary.add_scalar(tag='%s/eval reward' % self.name,
scalar_value=reward, global_step=epoch * 40)
def summary_GVFs_loss(self, loss, epoch):
self.summary.add_scalar(tag='%s/GVFs loss' % self.name,
scalar_value=loss, global_step=epoch * 40)
def update(self):
if len(self.memory._storage) <= self.reinforce_config.batch_size:
return
beta = self.beta_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=
beta, global_step=self.steps)
if self.reinforce_config.use_prior_memory:
batch = self.memory.sample(self.reinforce_config.batch_size, beta)
(states, actions, reward, next_states, is_terminal, weights,
batch_idxes) = batch
self.summary.add_histogram(tag='%s/Batch Indices' % self.name,
values=Tensor(batch_idxes), global_step=self.steps)
else:
batch = self.memory.sample(self.reinforce_config.batch_size)
(states, actions, reward, next_states, is_terminal, features_vector
) = batch
states = FloatTensor(states)
terminal = FloatTensor([(1 if t else 0) for t in is_terminal])
reward = FloatTensor(reward)
features_vector = FloatTensor(features_vector)
batch_index = torch.arange(self.reinforce_config.batch_size, dtype=
torch.long)
feature_values, q_values = self.eval_model.predict_batch(states)
q_values = q_values.flatten()
q_max = []
f_max = []
for i, ns in enumerate(next_states):
feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns
).view(-1, self.state_length))
q_value_max, idx = q_n.max(0)
features_max = feature_n[idx]
q_max.append(q_value_max)
if self.network_config.version in ['v10', 'v11']:
features_max[:, :3] = features_max[:, :3] * ns[idx, 65
] / states[i, 65]
features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66
] / states[i, 66]
features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63
] / states[i, 63]
features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64
] / states[i, 64]
features_max[features_max == float('inf')] = 0
f_max.append(features_max.view(-1))
q_max = torch.stack(q_max, dim=1).view(-1)
f_max = torch.stack(f_max)
q_max = (1 - terminal) * q_max
f_max = (1 - terminal.view(-1, 1)) * f_max
q_target = reward + self.reinforce_config.discount_factor * q_max
f_target = (features_vector + self.reinforce_config.discount_factor *
f_max)
if (torch.sum(feature_values != feature_values).item() + torch.sum(
f_target != f_target)).item() > 0:
f_target[f_target != f_target] = 0
self.eval_model.fit(q_values, q_target, feature_values, f_target)
if self.reinforce_config.use_prior_memory:
td_errors = q_values - q_target
new_priorities = torch.abs(td_errors) + 1e-06
self.memory.update_priorities(batch_idxes, new_priorities.data)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, weight_dict):
self.eval_model.load_weight(weight_dict)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, new_feature_weights, new_q_weights):
self.eval_model.feautre_model.load_state_dict(new_feature_weights)
self.eval_model.q_model.load_state_dict(new_q_weights)
<|reserved_special_token_1|>
import logging
import time
import random
import pickle
import os
from sys import maxsize
import torch
from tensorboardX import SummaryWriter
from baselines.common.schedules import LinearSchedule
from abp.utils import clear_summary_path
from abp.models.feature_q_model import feature_q_model
from abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom
import numpy as np
logger = logging.getLogger('root')
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
class SADQ_GQF(object):
"""Adaptive which uses the SADQ algorithm"""
def __init__(self, name, state_length, network_config, reinforce_config, feature_len, combine_decomposed_func, is_sigmoid = False, memory_resotre = True):
super(SADQ_GQF, self).__init__()
self.name = name
#self.choices = choices
self.network_config = network_config
self.reinforce_config = reinforce_config
self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)
self.learning = True
self.explanation = False
self.state_length = state_length
self.features = 0
self.feature_len = feature_len
# Global
self.steps = 0
self.reward_history = []
self.episode_time_history = []
self.best_reward_mean = -maxsize
self.episode = 0
self.feature_len = feature_len
self.features = None
self.reset()
self.memory_resotre = memory_resotre
reinforce_summary_path = self.reinforce_config.summaries_path + "/" + self.name
if not self.network_config.restore_network:
clear_summary_path(reinforce_summary_path)
else:
self.restore_state()
self.summary = SummaryWriter(log_dir=reinforce_summary_path)
self.eval_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)
self.target_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)
# self.target_model.eval_mode()
self.beta_schedule = LinearSchedule(self.reinforce_config.beta_timesteps,
initial_p=self.reinforce_config.beta_initial,
final_p=self.reinforce_config.beta_final)
self.epsilon_schedule = LinearSchedule(self.reinforce_config.epsilon_timesteps,
initial_p=self.reinforce_config.starting_epsilon,
final_p=self.reinforce_config.final_epsilon)
# def __del__(self):
# self.save()
# self.summary.close()
def should_explore(self):
self.epsilon = self.epsilon_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Epsilon' % self.name,
scalar_value=self.epsilon,
global_step=self.steps)
return random.random() < self.epsilon
def predict(self, state, isGreedy = False, is_random = False):
if self.learning:
self.steps += 1
# add to experience
if self.previous_state is not None and self.learning and self.current_reward is not None:
state_crr = np.unique(state, axis=0)
self.memory.add(self.previous_state,
None,
self.current_reward,
state_crr.reshape(-1, self.state_length), 0,
self.features)
# print("not final : {}".format(self.current_reward) )
# print(0, self.features)
if self.learning and self.should_explore() and not isGreedy:
q_values = None
fv = None
choice = random.choice(list(range(len(state))))
action = choice
else:
with torch.no_grad():
features_vector, q_values = self.eval_model.predict_batch(Tensor(state))
q_values = FloatTensor(q_values).view(-1)
_, choice = q_values.max(0)
action = choice
fv = features_vector[choice]
# print("q_value : {}".format(q_values))
# input()
if self.learning and self.steps % self.reinforce_config.replace_frequency == 0:
logger.debug("Replacing target model for %s" % self.name)
if self.reinforce_config.replace_frequency != 1:
self.target_model.replace(self.eval_model)
else:
self.target_model.replace_soft(self.eval_model)
# self.target_model.eval_mode()
if (self.learning and
self.steps > self.reinforce_config.update_start and
self.steps % self.reinforce_config.update_steps == 0):
self.update_time -= time.time()
self.update()
self.update_time += time.time()
self.current_reward = 0
self.previous_state = state[action]
#self.previous_action = action
return choice, fv#,q_values
def disable_learning(self, is_save = False):
logger.info("Disabled Learning for %s agent" % self.name)
if is_save:
# self.save()
self.save(force = True)
self.learning = False
self.episode = 0
def enable_learning(self):
logger.info("enabled Learning for %s agent" % self.name)
self.learning = True
self.reset()
def end_episode(self, state):
if not self.learning:
return
# print("end:")
# print(self.current_reward)
# input()
episode_time = time.time() - self.episode_time
self.reward_history.append(self.total_reward)
self.episode_time_history.append(episode_time)
total_time = sum(self.episode_time_history)
avg_time = total_time / len(self.episode_time_history)
logger.info("End of Episode %d, "
"Total reward %.2f, "
"Epsilon %.2f" % (self.episode + 1,
self.total_reward,
self.epsilon))
logger.debug("Episode Time: %.2fs (%.2fs), "
"Prediction Time: %.2f, "
"Update Time %.2f" % (episode_time,
avg_time,
self.prediction_time,
self.update_time))
self.episode += 1
self.summary.add_scalar(tag='%s/Episode Reward' % self.name,
scalar_value=self.total_reward,
global_step=self.episode)
self.memory.add(self.previous_state,
None,
self.current_reward,
state.reshape(-1, self.state_length), 1,
self.features)
# print("final : {}".format(self.current_reward) )
# input()
# print(1, self.features)
self.save()
self.reset()
def reset(self):
self.episode_time = time.time()
self.current_reward = 0
self.total_reward = 0
self.previous_state = None
self.previous_action = None
self.prediction_time = 0
self.update_time = 0
self.features = None
def restore_state(self):
restore_path = self.network_config.network_path + "/adaptive.info"
if self.network_config.network_path and os.path.exists(restore_path) and self.memory_resotre:
logger.info("Restoring state from %s" % self.network_config.network_path)
with open(restore_path, "rb") as file:
info = pickle.load(file)
self.steps = info["steps"]
# self.best_reward_mean = info["best_reward_mean"]
self.episode = info["episode"]
self.memory.load(self.network_config.network_path)
print("lenght of memeory: ", len(self.memory))
def save(self, force=False, appendix=""):
info = {
"steps": self.steps,
"best_reward_mean": self.best_reward_mean,
"episode": self.episode
}
if (len(self.reward_history) >= self.network_config.save_steps and
self.episode % self.network_config.save_steps == 0) or force:
total_reward = sum(self.reward_history[-self.network_config.save_steps:])
current_reward_mean = total_reward / self.network_config.save_steps
if force: #or current_reward_mean >= self.best_reward_mean:
print("*************saved*****************", current_reward_mean, self.best_reward_mean)
if not force:
self.best_reward_mean = current_reward_mean
logger.info("Saving network. Found new best reward (%.2f)" % total_reward)
self.eval_model.save_network(appendix = appendix)
self.target_model.save_network(appendix = appendix)
# self.eval_model.save_network()
# self.target_model.save_network()
with open(self.network_config.network_path + "/adaptive.info", "wb") as file:
pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)
self.memory.save(self.network_config.network_path)
print("lenght of memeory: ", len(self.memory))
else:
logger.info("The best reward is still %.2f. Not saving" % self.best_reward_mean)
def reward(self, r):
self.total_reward += r
self.current_reward += r
def passFeatures(self, features):
self.features = features.copy()
return
def summary_test(self, reward, epoch):
self.summary.add_scalar(tag='%s/eval reward' % self.name,
scalar_value=reward, global_step=epoch * 40)
def summary_GVFs_loss(self, loss, epoch):
self.summary.add_scalar(tag='%s/GVFs loss' % self.name,
scalar_value=loss, global_step=epoch * 40)
def update(self):
if len(self.memory._storage) <= self.reinforce_config.batch_size:
return
# self.eval_model.train_mode()
beta = self.beta_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Beta' % self.name,
scalar_value=beta, global_step=self.steps)
if self.reinforce_config.use_prior_memory:
batch = self.memory.sample(self.reinforce_config.batch_size, beta)
(states, actions, reward, next_states,
is_terminal, weights, batch_idxes) = batch
self.summary.add_histogram(tag='%s/Batch Indices' % self.name,
values=Tensor(batch_idxes),
global_step=self.steps)
else:
batch = self.memory.sample(self.reinforce_config.batch_size)
(states, actions, reward, next_states, is_terminal, features_vector) = batch
states = FloatTensor(states)
# print(states.size())
# next_states = FloatTensor(next_states)
terminal = FloatTensor([1 if t else 0 for t in is_terminal])
reward = FloatTensor(reward)
features_vector = FloatTensor(features_vector)
batch_index = torch.arange(self.reinforce_config.batch_size,
dtype=torch.long)
# Current Q Values
feature_values, q_values = self.eval_model.predict_batch(states)
q_values = q_values.flatten()
q_max = []
f_max = []
for i, ns in enumerate(next_states):
feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns).view(-1, self.state_length))
q_value_max, idx = q_n.max(0)
features_max = feature_n[idx]
q_max.append(q_value_max)
if self.network_config.version in ["v10", "v11"]:
# print(features_max)
# print(ns[idx, 63:67])
# print(states[i, 63:67])
# print(features_max.size(), FloatTensor(ns).view(-1, self.state_length).size(), states.size())
features_max[:, :3] = (features_max[:, :3] * ns[idx, 65]) / states[i, 65]
features_max[:, 3:6] = (features_max[:, 3:6] * ns[idx, 66]) / states[i, 66]
features_max[:, 6:9] = (features_max[:, 6:9] * ns[idx, 63]) / states[i, 63]
features_max[:, 9:12] = (features_max[:, 9:12] * ns[idx, 64]) / states[i, 64]
features_max[features_max == float('inf')] = 0
# print(features_max)
# input()
f_max.append(features_max.view(-1))
# if torch.sum(terminal == torch.sum(features_vector, dim = 1)) != len(terminal):
# print(terminal)
# print(features_vector)
# input()
q_max = torch.stack(q_max, dim = 1).view(-1)
f_max = torch.stack(f_max)
q_max = (1 - terminal) * q_max
f_max = (1 - terminal.view(-1, 1)) * f_max
q_target = reward + self.reinforce_config.discount_factor * q_max
f_target = features_vector + self.reinforce_config.discount_factor * f_max
# if torch.sum(reward).item() > 0:
# print(reward)
# print(feature_values)
# print(q_target)
# print(q_values)
# input()
# update model
if (torch.sum(feature_values != feature_values).item() + torch.sum(f_target != f_target)).item() > 0:
# print("1")
# print(features_vector)
# print("2")
# print(feature_values)
# print("3")
# print(f_target)
# print("4")
# print(f_max)
# print("5")
# print(states.tolist())
# input()
f_target[f_target != f_target] = 0
self.eval_model.fit(q_values, q_target, feature_values, f_target)
# Update priorities
if self.reinforce_config.use_prior_memory:
td_errors = q_values - q_target
new_priorities = torch.abs(td_errors) + 1e-6 # prioritized_replay_eps
self.memory.update_priorities(batch_idxes, new_priorities.data)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, weight_dict):
self.eval_model.load_weight(weight_dict)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, new_feature_weights, new_q_weights):
self.eval_model.feautre_model.load_state_dict(new_feature_weights)
self.eval_model.q_model.load_state_dict(new_q_weights)
|
flexible
|
{
"blob_id": "424a0e8a7a80e24aec4bdb9b8c84fd9a5e6090c6",
"index": 6782,
"step-1": "<mask token>\n\n\nclass SADQ_GQF(object):\n <mask token>\n <mask token>\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n <mask token>\n <mask token>\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n <mask token>\n <mask token>\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n <mask token>\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n <mask token>\n <mask token>\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-2": "<mask token>\n\n\nclass SADQ_GQF(object):\n <mask token>\n\n def __init__(self, name, state_length, network_config, reinforce_config,\n feature_len, combine_decomposed_func, is_sigmoid=False,\n memory_resotre=True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n self.features = 0\n self.feature_len = feature_len\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = (self.reinforce_config.summaries_path +\n '/' + self.name)\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.beta_schedule = LinearSchedule(self.reinforce_config.\n beta_timesteps, initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.\n epsilon_timesteps, initial_p=self.reinforce_config.\n starting_epsilon, final_p=self.reinforce_config.final_epsilon)\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n\n def disable_learning(self, is_save=False):\n logger.info('Disabled Learning for %s agent' % self.name)\n if is_save:\n self.save(force=True)\n self.learning = False\n self.episode = 0\n\n def enable_learning(self):\n logger.info('enabled Learning for %s agent' % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n <mask token>\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n <mask token>\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n <mask token>\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-3": "<mask token>\n\n\nclass SADQ_GQF(object):\n <mask token>\n\n def __init__(self, name, state_length, network_config, reinforce_config,\n feature_len, combine_decomposed_func, is_sigmoid=False,\n memory_resotre=True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n self.features = 0\n self.feature_len = feature_len\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = (self.reinforce_config.summaries_path +\n '/' + self.name)\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.beta_schedule = LinearSchedule(self.reinforce_config.\n beta_timesteps, initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.\n epsilon_timesteps, initial_p=self.reinforce_config.\n starting_epsilon, final_p=self.reinforce_config.final_epsilon)\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n\n def disable_learning(self, is_save=False):\n logger.info('Disabled Learning for %s agent' % self.name)\n if is_save:\n self.save(force=True)\n self.learning = False\n self.episode = 0\n\n def enable_learning(self):\n logger.info('enabled Learning for %s agent' % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n\n def save(self, force=False, appendix=''):\n info = {'steps': self.steps, 'best_reward_mean': self.\n best_reward_mean, 'episode': self.episode}\n if (len(self.reward_history) >= self.network_config.save_steps and \n self.episode % self.network_config.save_steps == 0 or force):\n total_reward = sum(self.reward_history[-self.network_config.\n save_steps:])\n current_reward_mean = total_reward / self.network_config.save_steps\n if force:\n print('*************saved*****************',\n current_reward_mean, self.best_reward_mean)\n if not force:\n self.best_reward_mean = current_reward_mean\n logger.info('Saving network. Found new best reward (%.2f)' %\n total_reward)\n self.eval_model.save_network(appendix=appendix)\n self.target_model.save_network(appendix=appendix)\n with open(self.network_config.network_path +\n '/adaptive.info', 'wb') as file:\n pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)\n self.memory.save(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n else:\n logger.info('The best reward is still %.2f. Not saving' %\n self.best_reward_mean)\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n\n def summary_GVFs_loss(self, loss, epoch):\n self.summary.add_scalar(tag='%s/GVFs loss' % self.name,\n scalar_value=loss, global_step=epoch * 40)\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, weight_dict):\n self.eval_model.load_weight(weight_dict)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-4": "import logging\nimport time\nimport random\nimport pickle\nimport os\nfrom sys import maxsize\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom baselines.common.schedules import LinearSchedule\nfrom abp.utils import clear_summary_path\nfrom abp.models.feature_q_model import feature_q_model\nfrom abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom\nimport numpy as np\nlogger = logging.getLogger('root')\nuse_cuda = torch.cuda.is_available()\nFloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor\nIntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor\nByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor\nTensor = FloatTensor\n\n\nclass SADQ_GQF(object):\n \"\"\"Adaptive which uses the SADQ algorithm\"\"\"\n\n def __init__(self, name, state_length, network_config, reinforce_config,\n feature_len, combine_decomposed_func, is_sigmoid=False,\n memory_resotre=True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n self.features = 0\n self.feature_len = feature_len\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = (self.reinforce_config.summaries_path +\n '/' + self.name)\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.beta_schedule = LinearSchedule(self.reinforce_config.\n beta_timesteps, initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.\n epsilon_timesteps, initial_p=self.reinforce_config.\n starting_epsilon, final_p=self.reinforce_config.final_epsilon)\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n\n def disable_learning(self, is_save=False):\n logger.info('Disabled Learning for %s agent' % self.name)\n if is_save:\n self.save(force=True)\n self.learning = False\n self.episode = 0\n\n def enable_learning(self):\n logger.info('enabled Learning for %s agent' % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n\n def save(self, force=False, appendix=''):\n info = {'steps': self.steps, 'best_reward_mean': self.\n best_reward_mean, 'episode': self.episode}\n if (len(self.reward_history) >= self.network_config.save_steps and \n self.episode % self.network_config.save_steps == 0 or force):\n total_reward = sum(self.reward_history[-self.network_config.\n save_steps:])\n current_reward_mean = total_reward / self.network_config.save_steps\n if force:\n print('*************saved*****************',\n current_reward_mean, self.best_reward_mean)\n if not force:\n self.best_reward_mean = current_reward_mean\n logger.info('Saving network. Found new best reward (%.2f)' %\n total_reward)\n self.eval_model.save_network(appendix=appendix)\n self.target_model.save_network(appendix=appendix)\n with open(self.network_config.network_path +\n '/adaptive.info', 'wb') as file:\n pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)\n self.memory.save(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n else:\n logger.info('The best reward is still %.2f. Not saving' %\n self.best_reward_mean)\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n\n def summary_GVFs_loss(self, loss, epoch):\n self.summary.add_scalar(tag='%s/GVFs loss' % self.name,\n scalar_value=loss, global_step=epoch * 40)\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, weight_dict):\n self.eval_model.load_weight(weight_dict)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-5": "import logging\nimport time\nimport random\nimport pickle\nimport os\nfrom sys import maxsize\n\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom baselines.common.schedules import LinearSchedule\n\nfrom abp.utils import clear_summary_path\nfrom abp.models.feature_q_model import feature_q_model\nfrom abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom\nimport numpy as np\n\nlogger = logging.getLogger('root')\nuse_cuda = torch.cuda.is_available()\nFloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor\nIntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor\nByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor\nTensor = FloatTensor\n\n\nclass SADQ_GQF(object):\n \"\"\"Adaptive which uses the SADQ algorithm\"\"\"\n\n def __init__(self, name, state_length, network_config, reinforce_config, feature_len, combine_decomposed_func, is_sigmoid = False, memory_resotre = True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n #self.choices = choices\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n\n self.features = 0\n self.feature_len = feature_len\n # Global\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = self.reinforce_config.summaries_path + \"/\" + self.name\n\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n \n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)\n# self.target_model.eval_mode()\n\n self.beta_schedule = LinearSchedule(self.reinforce_config.beta_timesteps,\n initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.epsilon_timesteps,\n initial_p=self.reinforce_config.starting_epsilon,\n final_p=self.reinforce_config.final_epsilon)\n\n# def __del__(self):\n# self.save()\n# self.summary.close()\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name,\n scalar_value=self.epsilon,\n global_step=self.steps)\n\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy = False, is_random = False):\n \n if self.learning:\n self.steps += 1\n # add to experience\n if self.previous_state is not None and self.learning and self.current_reward is not None:\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state,\n None,\n self.current_reward,\n state_crr.reshape(-1, self.state_length), 0,\n self.features)\n# print(\"not final : {}\".format(self.current_reward) )\n# print(0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n# print(\"q_value : {}\".format(q_values))\n# input()\n if self.learning and self.steps % self.reinforce_config.replace_frequency == 0:\n logger.debug(\"Replacing target model for %s\" % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n# self.target_model.eval_mode()\n\n if (self.learning and\n self.steps > self.reinforce_config.update_start and\n self.steps % self.reinforce_config.update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n\n self.current_reward = 0\n self.previous_state = state[action]\n #self.previous_action = action\n\n return choice, fv#,q_values\n\n def disable_learning(self, is_save = False):\n logger.info(\"Disabled Learning for %s agent\" % self.name)\n if is_save:\n# self.save()\n self.save(force = True)\n self.learning = False\n self.episode = 0\n \n def enable_learning(self):\n logger.info(\"enabled Learning for %s agent\" % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n# print(\"end:\")\n# print(self.current_reward)\n# input()\n episode_time = time.time() - self.episode_time\n\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n\n logger.info(\"End of Episode %d, \"\n \"Total reward %.2f, \"\n \"Epsilon %.2f\" % (self.episode + 1,\n self.total_reward,\n self.epsilon))\n\n logger.debug(\"Episode Time: %.2fs (%.2fs), \"\n \"Prediction Time: %.2f, \"\n \"Update Time %.2f\" % (episode_time,\n avg_time,\n self.prediction_time,\n self.update_time))\n\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward,\n global_step=self.episode)\n\n self.memory.add(self.previous_state,\n None,\n self.current_reward,\n state.reshape(-1, self.state_length), 1,\n self.features)\n# print(\"final : {}\".format(self.current_reward) )\n# input()\n# print(1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + \"/adaptive.info\"\n if self.network_config.network_path and os.path.exists(restore_path) and self.memory_resotre:\n logger.info(\"Restoring state from %s\" % self.network_config.network_path)\n\n with open(restore_path, \"rb\") as file:\n info = pickle.load(file)\n\n self.steps = info[\"steps\"]\n# self.best_reward_mean = info[\"best_reward_mean\"]\n self.episode = info[\"episode\"]\n self.memory.load(self.network_config.network_path)\n print(\"lenght of memeory: \", len(self.memory))\n\n def save(self, force=False, appendix=\"\"):\n info = {\n \"steps\": self.steps,\n \"best_reward_mean\": self.best_reward_mean,\n \"episode\": self.episode\n }\n \n if (len(self.reward_history) >= self.network_config.save_steps and\n self.episode % self.network_config.save_steps == 0) or force:\n\n total_reward = sum(self.reward_history[-self.network_config.save_steps:])\n current_reward_mean = total_reward / self.network_config.save_steps\n\n if force: #or current_reward_mean >= self.best_reward_mean:\n print(\"*************saved*****************\", current_reward_mean, self.best_reward_mean)\n if not force:\n self.best_reward_mean = current_reward_mean\n logger.info(\"Saving network. Found new best reward (%.2f)\" % total_reward)\n self.eval_model.save_network(appendix = appendix)\n self.target_model.save_network(appendix = appendix)\n# self.eval_model.save_network()\n# self.target_model.save_network()\n with open(self.network_config.network_path + \"/adaptive.info\", \"wb\") as file:\n pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)\n self.memory.save(self.network_config.network_path)\n print(\"lenght of memeory: \", len(self.memory))\n else:\n logger.info(\"The best reward is still %.2f. Not saving\" % self.best_reward_mean)\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n def summary_GVFs_loss(self, loss, epoch):\n self.summary.add_scalar(tag='%s/GVFs loss' % self.name,\n scalar_value=loss, global_step=epoch * 40)\n \n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n# self.eval_model.train_mode()\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name,\n scalar_value=beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states,\n is_terminal, weights, batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes),\n global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector) = batch\n\n states = FloatTensor(states)\n# print(states.size())\n# next_states = FloatTensor(next_states)\n terminal = FloatTensor([1 if t else 0 for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size,\n dtype=torch.long)\n # Current Q Values\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n \n q_max.append(q_value_max)\n if self.network_config.version in [\"v10\", \"v11\"]:\n# print(features_max)\n# print(ns[idx, 63:67])\n# print(states[i, 63:67])\n# print(features_max.size(), FloatTensor(ns).view(-1, self.state_length).size(), states.size())\n features_max[:, :3] = (features_max[:, :3] * ns[idx, 65]) / states[i, 65]\n features_max[:, 3:6] = (features_max[:, 3:6] * ns[idx, 66]) / states[i, 66]\n features_max[:, 6:9] = (features_max[:, 6:9] * ns[idx, 63]) / states[i, 63]\n features_max[:, 9:12] = (features_max[:, 9:12] * ns[idx, 64]) / states[i, 64]\n features_max[features_max == float('inf')] = 0\n# print(features_max)\n# input()\n f_max.append(features_max.view(-1))\n \n# if torch.sum(terminal == torch.sum(features_vector, dim = 1)) != len(terminal):\n# print(terminal)\n# print(features_vector)\n# input()\n q_max = torch.stack(q_max, dim = 1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n \n f_max = (1 - terminal.view(-1, 1)) * f_max\n \n q_target = reward + self.reinforce_config.discount_factor * q_max\n \n f_target = features_vector + self.reinforce_config.discount_factor * f_max\n \n# if torch.sum(reward).item() > 0:\n# print(reward)\n# print(feature_values)\n# print(q_target)\n# print(q_values)\n# input()\n # update model\n if (torch.sum(feature_values != feature_values).item() + torch.sum(f_target != f_target)).item() > 0:\n\n# print(\"1\")\n# print(features_vector)\n# print(\"2\")\n# print(feature_values)\n# print(\"3\")\n# print(f_target)\n# print(\"4\")\n# print(f_max)\n# print(\"5\")\n# print(states.tolist())\n# input()\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n\n # Update priorities\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-6 # prioritized_replay_eps\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n \n def load_model(self, model):\n self.eval_model.replace(model)\n \n def load_weight(self, weight_dict):\n self.eval_model.load_weight(weight_dict)\n \n def load_model(self, model):\n self.eval_model.replace(model)\n \n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)",
"step-ids": [
11,
16,
19,
22,
23
]
}
|
[
11,
16,
19,
22,
23
] |
from django import forms
from .models import HhRequest
class WorkRequestForm(forms.ModelForm):
"""Форма заявки на премию"""
class Meta:
model = HhRequest
fields = ('profile', 'sphere', 'experience', 'work_request', 'resume')
widgets = {
'profile': forms.Select(
attrs={
'id': 'profile',
'required': '',
'class': 'browser-default custom-select'
}
),
'sphere': forms.Select(
attrs={
'id': 'sphere',
'required': '',
'class': 'browser-default custom-select'
}
),
'experience': forms.Select(
attrs={
'id': 'experience',
'required': '',
'class': 'browser-default custom-select'
}
),
'work_request': forms.Select(
attrs={
'id': 'work_request',
'required': '',
'class': 'browser-default custom-select'
}
),
'resume': forms.FileInput(
attrs={
'id': 'hh_resume',
'required': '',
'class': 'custom-file-input',
'lang': 'ru'
}
),
}
|
normal
|
{
"blob_id": "3887516e4222504defe439e62bd24b12db3cdd84",
"index": 695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass WorkRequestForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = HhRequest\n fields = 'profile', 'sphere', 'experience', 'work_request', 'resume'\n widgets = {'profile': forms.Select(attrs={'id': 'profile',\n 'required': '', 'class': 'browser-default custom-select'}),\n 'sphere': forms.Select(attrs={'id': 'sphere', 'required': '',\n 'class': 'browser-default custom-select'}), 'experience': forms\n .Select(attrs={'id': 'experience', 'required': '', 'class':\n 'browser-default custom-select'}), 'work_request': forms.Select\n (attrs={'id': 'work_request', 'required': '', 'class':\n 'browser-default custom-select'}), 'resume': forms.FileInput(\n attrs={'id': 'hh_resume', 'required': '', 'class':\n 'custom-file-input', 'lang': 'ru'})}\n",
"step-3": "<mask token>\n\n\nclass WorkRequestForm(forms.ModelForm):\n \"\"\"Форма заявки на премию\"\"\"\n\n\n class Meta:\n model = HhRequest\n fields = 'profile', 'sphere', 'experience', 'work_request', 'resume'\n widgets = {'profile': forms.Select(attrs={'id': 'profile',\n 'required': '', 'class': 'browser-default custom-select'}),\n 'sphere': forms.Select(attrs={'id': 'sphere', 'required': '',\n 'class': 'browser-default custom-select'}), 'experience': forms\n .Select(attrs={'id': 'experience', 'required': '', 'class':\n 'browser-default custom-select'}), 'work_request': forms.Select\n (attrs={'id': 'work_request', 'required': '', 'class':\n 'browser-default custom-select'}), 'resume': forms.FileInput(\n attrs={'id': 'hh_resume', 'required': '', 'class':\n 'custom-file-input', 'lang': 'ru'})}\n",
"step-4": "from django import forms\nfrom .models import HhRequest\n\n\nclass WorkRequestForm(forms.ModelForm):\n \"\"\"Форма заявки на премию\"\"\"\n\n\n class Meta:\n model = HhRequest\n fields = 'profile', 'sphere', 'experience', 'work_request', 'resume'\n widgets = {'profile': forms.Select(attrs={'id': 'profile',\n 'required': '', 'class': 'browser-default custom-select'}),\n 'sphere': forms.Select(attrs={'id': 'sphere', 'required': '',\n 'class': 'browser-default custom-select'}), 'experience': forms\n .Select(attrs={'id': 'experience', 'required': '', 'class':\n 'browser-default custom-select'}), 'work_request': forms.Select\n (attrs={'id': 'work_request', 'required': '', 'class':\n 'browser-default custom-select'}), 'resume': forms.FileInput(\n attrs={'id': 'hh_resume', 'required': '', 'class':\n 'custom-file-input', 'lang': 'ru'})}\n",
"step-5": "from django import forms\n\nfrom .models import HhRequest\n\n\nclass WorkRequestForm(forms.ModelForm):\n \"\"\"Форма заявки на премию\"\"\"\n class Meta:\n model = HhRequest\n fields = ('profile', 'sphere', 'experience', 'work_request', 'resume')\n\n widgets = {\n\n 'profile': forms.Select(\n attrs={\n 'id': 'profile',\n 'required': '',\n 'class': 'browser-default custom-select'\n }\n ),\n 'sphere': forms.Select(\n attrs={\n 'id': 'sphere',\n 'required': '',\n 'class': 'browser-default custom-select'\n }\n ),\n 'experience': forms.Select(\n attrs={\n 'id': 'experience',\n 'required': '',\n 'class': 'browser-default custom-select'\n }\n ),\n 'work_request': forms.Select(\n attrs={\n 'id': 'work_request',\n 'required': '',\n 'class': 'browser-default custom-select'\n }\n ),\n 'resume': forms.FileInput(\n attrs={\n 'id': 'hh_resume',\n 'required': '',\n 'class': 'custom-file-input',\n 'lang': 'ru'\n }\n ),\n\n }\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#-*-coding:utf-8 -*-
import subprocess
def get_audio(text):
stat = subprocess.call(['./tts', text])
if stat == 0:
return "Success"
else:
print "Failed"
if __name__ == '__main__':
text = "我是聊天机器人"
get_audio(text)
|
normal
|
{
"blob_id": "93eafb5b23bac513fc5dcc177a4e8a080b2a49b4",
"index": 9054,
"step-1": "#-*-coding:utf-8 -*-\n\nimport subprocess\n\ndef get_audio(text):\n stat = subprocess.call(['./tts', text])\n \n if stat == 0:\n return \"Success\"\n else:\n print \"Failed\"\n\nif __name__ == '__main__':\n text = \"我是聊天机器人\"\n get_audio(text)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.conf import settings
from django.db import models
def get_image_filename(instance, filename):
a = f'post_images/{instance.post.title}.svg'
return a
def get_main_image_filename(instance, filename):
a = f'post_images/{instance.title}_main.svg'
return a
# Create your models here.
class Posts(models.Model):
PYEONG_CHOICE_FIELD = (
('1-7', '1-7평'),
('8-15', '8-15평'),
('16-25', '16-25평'),
('26-', '그 이상'),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.TextField(
'제목', max_length=50
)
content = models.TextField(
'작성 글', max_length=500
)
main_image = models.ImageField(
upload_to=get_main_image_filename,
blank=True,
null=True,
verbose_name='메인 이미지',
)
pyeong = models.ManyToManyField(
'Pyeong',
blank=True,
)
created_at = models.DateTimeField(
'생성 날짜', auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='수정 날짜', auto_now=True, null=True, blank=True
)
like_users = models.ManyToManyField(
'members.Users',
through='PostLike',
related_name='like_posts',
related_query_name='like_post',
blank=True,
)
colors = models.ManyToManyField(
'posts.Colors',
blank=True,
)
housingtype = models.ManyToManyField(
'HousingTypes',
blank=True,
)
style = models.ManyToManyField(
'Styles',
blank=True,
)
postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)
@staticmethod
def initial_setting():
Pyeong.make_pyeng()
Colors.make_color()
HousingTypes.make_housing_type()
Styles.make_style()
class Meta:
verbose_name = '게시글'
verbose_name_plural = '게시글 목록'
def __str__(self):
return '%s : %s' % (self.pk, self.title)
class Comments(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
verbose_name='포스트',
related_name='comment_set',
related_query_name='comments',
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
content = models.TextField(
'댓글 내용', max_length=500
)
# 글쓴이
created_at = models.DateTimeField(
'작성 날', auto_now_add=True,
)
updated_at = models.DateTimeField(
'수정 날짜', auto_now=True,
)
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
# 여기서 이미지 처리를 하게 될 듯
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
user = models.ForeignKey(
'members.Users',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(
auto_now_add=True,
)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(
post_pk=self.post.pk,
username=self.user.username,
)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
# 특정 유저가 특정 포스트 좋아요를 누른 정보는 유니크 해야 함.
unique_together = (
('post', 'user'),
)
class Pyeong(models.Model):
type = models.CharField(
'평 수',
max_length=20,
)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range((len(index_list))):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField(
'주거 환경',
max_length=20,
)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField(
'디자인 스타일',
max_length=10,
)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스', '로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField(
'색상',
max_length=10
)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색', '회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
image = models.ImageField(
upload_to=get_image_filename,
verbose_name='다중 이미지',
)
image_comment = models.TextField(
'사진 설명', max_length=200, blank=True, null=True,
)
# 이미지 추가 스택오버플로우 정보
# https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django
|
normal
|
{
"blob_id": "1bbadf02c4b9ca22a0099bcc09fa4c62c9901c39",
"index": 1069,
"step-1": "<mask token>\n\n\nclass Styles(models.Model):\n <mask token>\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-2": "<mask token>\n\n\nclass Pyeong(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-3": "<mask token>\n\n\nclass Comments(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-4": "<mask token>\n\n\nclass Posts(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-5": "from django.conf import settings\nfrom django.db import models\n\n\ndef get_image_filename(instance, filename):\n a = f'post_images/{instance.post.title}.svg'\n return a\n\n\ndef get_main_image_filename(instance, filename):\n a = f'post_images/{instance.title}_main.svg'\n return a\n\n\n# Create your models here.\nclass Posts(models.Model):\n PYEONG_CHOICE_FIELD = (\n ('1-7', '1-7평'),\n ('8-15', '8-15평'),\n ('16-25', '16-25평'),\n ('26-', '그 이상'),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n title = models.TextField(\n '제목', max_length=50\n )\n content = models.TextField(\n '작성 글', max_length=500\n )\n main_image = models.ImageField(\n upload_to=get_main_image_filename,\n blank=True,\n null=True,\n verbose_name='메인 이미지',\n )\n pyeong = models.ManyToManyField(\n 'Pyeong',\n blank=True,\n )\n created_at = models.DateTimeField(\n '생성 날짜', auto_now_add=True,\n )\n updated_at = models.DateTimeField(\n verbose_name='수정 날짜', auto_now=True, null=True, blank=True\n )\n\n like_users = models.ManyToManyField(\n 'members.Users',\n through='PostLike',\n related_name='like_posts',\n related_query_name='like_post',\n blank=True,\n )\n\n colors = models.ManyToManyField(\n 'posts.Colors',\n blank=True,\n )\n housingtype = models.ManyToManyField(\n 'HousingTypes',\n blank=True,\n )\n style = models.ManyToManyField(\n 'Styles',\n blank=True,\n )\n postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n verbose_name='포스트',\n related_name='comment_set',\n related_query_name='comments',\n )\n author = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n content = models.TextField(\n '댓글 내용', max_length=500\n )\n # 글쓴이\n created_at = models.DateTimeField(\n '작성 날', auto_now_add=True,\n )\n updated_at = models.DateTimeField(\n '수정 날짜', auto_now=True,\n )\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n # 여기서 이미지 처리를 하게 될 듯\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n )\n user = models.ForeignKey(\n 'members.Users',\n on_delete=models.CASCADE,\n )\n created_at = models.DateTimeField(\n auto_now_add=True,\n )\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(\n post_pk=self.post.pk,\n username=self.user.username,\n )\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n # 특정 유저가 특정 포스트 좋아요를 누른 정보는 유니크 해야 함.\n unique_together = (\n ('post', 'user'),\n )\n\n\nclass Pyeong(models.Model):\n type = models.CharField(\n '평 수',\n max_length=20,\n )\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range((len(index_list))):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField(\n '주거 환경',\n max_length=20,\n )\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField(\n '디자인 스타일',\n max_length=10,\n )\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스', '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField(\n '색상',\n max_length=10\n )\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색', '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n )\n image = models.ImageField(\n upload_to=get_image_filename,\n verbose_name='다중 이미지',\n )\n image_comment = models.TextField(\n '사진 설명', max_length=200, blank=True, null=True,\n )\n # 이미지 추가 스택오버플로우 정보\n # https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django\n",
"step-ids": [
9,
15,
23,
27,
32
]
}
|
[
9,
15,
23,
27,
32
] |
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def showHomepage():
return render_template('home.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def showHomepage():
return render_template('home.html')
if __name__ == '__main__':
print('app started')
app.secret_key = 'secretkey'
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/', methods=['GET'])
def showHomepage():
return render_template('home.html')
if __name__ == '__main__':
print('app started')
app.secret_key = 'secretkey'
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, render_template, request, url_for, redirect, flash, jsonify
app = Flask(__name__)
@app.route('/', methods=['GET'])
def showHomepage():
return render_template('home.html')
if __name__ == '__main__':
print('app started')
app.secret_key = 'secretkey'
app.run(debug=True)
<|reserved_special_token_1|>
from flask import (Flask,
render_template,
request,
url_for,
redirect,
flash,
jsonify)
app = Flask(__name__)
@app.route('/', methods=['GET'])
def showHomepage():
return render_template('home.html')
if __name__ == '__main__':
print('app started')
app.secret_key = 'secretkey'
app.run(debug=True)
|
flexible
|
{
"blob_id": "3001534be3364be1148cd51a4a943fd8c975d87e",
"index": 8384,
"step-1": "<mask token>\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n print('app started')\n app.secret_key = 'secretkey'\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n print('app started')\n app.secret_key = 'secretkey'\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template, request, url_for, redirect, flash, jsonify\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n print('app started')\n app.secret_key = 'secretkey'\n app.run(debug=True)\n",
"step-5": "from flask import (Flask,\n\trender_template,\n\trequest,\n\turl_for,\n\tredirect,\n\tflash,\n\tjsonify)\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n\treturn render_template('home.html')\n\n\nif __name__ == '__main__':\n\tprint('app started')\n\tapp.secret_key = 'secretkey'\n\tapp.run(debug=True)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ic(compare('tranpsosed', 'transposed'))
print(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')
ic(compare_info('momther', 'mother'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
start = perf_counter_ns()
ic(compare('tranpsosed', 'transposed'))
print(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')
ic(compare_info('momther', 'mother'))
<|reserved_special_token_1|>
from time import perf_counter_ns
from anthony.utility.distance import compare, compare_info
from icecream import ic
start = perf_counter_ns()
ic(compare('tranpsosed', 'transposed'))
print(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')
ic(compare_info('momther', 'mother'))
<|reserved_special_token_1|>
from time import perf_counter_ns
from anthony.utility.distance import compare, compare_info
from icecream import ic
start = perf_counter_ns()
ic(compare("tranpsosed", "transposed"))
print(f"Example Time: {(perf_counter_ns() - start)/1e+9} Seconds")
ic(compare_info("momther", "mother"))
|
flexible
|
{
"blob_id": "98b0e42f3ed1a234f63c4d3aa76ceb9fce7c041d",
"index": 3631,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nic(compare('tranpsosed', 'transposed'))\nprint(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')\nic(compare_info('momther', 'mother'))\n",
"step-3": "<mask token>\nstart = perf_counter_ns()\nic(compare('tranpsosed', 'transposed'))\nprint(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')\nic(compare_info('momther', 'mother'))\n",
"step-4": "from time import perf_counter_ns\nfrom anthony.utility.distance import compare, compare_info\nfrom icecream import ic\nstart = perf_counter_ns()\nic(compare('tranpsosed', 'transposed'))\nprint(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')\nic(compare_info('momther', 'mother'))\n",
"step-5": "from time import perf_counter_ns\n\nfrom anthony.utility.distance import compare, compare_info\nfrom icecream import ic\n\nstart = perf_counter_ns()\nic(compare(\"tranpsosed\", \"transposed\"))\nprint(f\"Example Time: {(perf_counter_ns() - start)/1e+9} Seconds\")\n\nic(compare_info(\"momther\", \"mother\"))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
__author__ = 'samar'
import mv_details
import product
|
normal
|
{
"blob_id": "7ac53779a98b6e4b236b1e81742163d2c610a274",
"index": 4556,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'samar'\n<mask token>\n",
"step-3": "__author__ = 'samar'\nimport mv_details\nimport product\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# encoding: utf-8
import tweepy #https://github.com/tweepy/tweepy
import csv
import scraperwiki
import json
#Twitter API credentials - these need adding
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#Better to be api = tweepy.API(auth, parser=JSONParser())
#See http://stackoverflow.com/questions/14856526/parsing-twitter-json-object-in-python
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print "oldest: ", oldest
print "alltweets[0]: ", alltweets[0]
#Converts first tweet to text
#see http://stackoverflow.com/questions/27900451/convert-tweepy-status-object-into-json
json_str = json.dumps(alltweets[0]._json)
#CONVERT TO LOOP TO DO SAME TO ALL TWEETS
record = {}
print "len(alltweets)", len(alltweets)
for tweet in alltweets:
print "type(tweet)", type(tweet)
json_str = json.dumps(tweet._json)
print "type(tweet) 2", type(json_str)
print "json_str:", json_str
#Split tweet on commas to create an array
tweetarray = json_str.split(', "')
#tweetid2 = json_str.split('/status/')[1].split('/')[0]
tweetid = json_str.split('"id": ')[1].split(',')[0]
tweettxt = json_str.split('"text": ')[1].split(', "is_quote_status"')[0]
tweetdate = json_str.split('"created_at": "')[2].split('", "')[0]
name = json_str.split('"name": "')[1].split('", "')[0]
screenname = json_str.split('"screen_name": "')[1].split('", "')[0]
tweeturl = "https://twitter.com/"+screenname+"/status/"+tweetid
record['tweetid'] = tweetid
record['tweettxt'] = tweettxt
record['tweetdate'] = tweetdate
record['name'] = name
record['screenname'] = screenname#
record['tweeturl'] = tweeturl
print "record: ", record
scraperwiki.sql.save(['tweetid'], record)
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print "getting tweets before %s" % (oldest)
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweetss
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print "...%s tweets downloaded so far" % (len(alltweets))
#transform the tweepy tweets into a 2D array that will populate the csv
#outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]
#need to convert to a dict before saving - could do it in the loop above rather than at end
for tweet in alltweets:
print "type(tweet)", type(tweet)
json_str = json.dumps(tweet._json)
print "type(tweet) 2", type(json_str)
print "json_str:", json_str
tweetarray = json_str.split(', "')
tweetid = json_str.split('"id": ')[1].split(',')[0]
tweettxt = json_str.split('"text": ')[1].split(', "is_quote_status"')[0]
tweetdate = json_str.split('"created_at": "')[2].split('", "')[0]
name = json_str.split('"name": "')[1].split('", "')[0]
screenname = json_str.split('"screen_name": "')[1].split('", "')[0]
tweeturl = "https://twitter.com/"+screenname+"/status/"+tweetid
record['tweetid'] = tweetid
record['tweettxt'] = tweettxt
record['tweetdate'] = tweetdate
record['name'] = name
record['screenname'] = screenname#
record['tweeturl'] = tweeturl
print "record: ", record
scraperwiki.sql.save(['tweetid'], record)
#Add names as strings to this list
accountslist = []
for account in accountslist:
if __name__ == '__main__':
#pass in the username of the account you want to download
get_all_tweets(account)
|
normal
|
{
"blob_id": "02230b44568808757fe45fd18d28881d9bc3e410",
"index": 8074,
"step-1": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport tweepy #https://github.com/tweepy/tweepy\nimport csv\nimport scraperwiki\nimport json\n\n#Twitter API credentials - these need adding\nconsumer_key = \"\"\nconsumer_secret = \"\"\naccess_key = \"\"\naccess_secret = \"\"\n\n\ndef get_all_tweets(screen_name):\n\t#Twitter only allows access to a users most recent 3240 tweets with this method\n\t\n\t#authorize twitter, initialize tweepy\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_key, access_secret)\n\tapi = tweepy.API(auth)\n\t#Better to be api = tweepy.API(auth, parser=JSONParser())\n\t#See http://stackoverflow.com/questions/14856526/parsing-twitter-json-object-in-python\n\t\n\t#initialize a list to hold all the tweepy Tweets\n\talltweets = []\t\n\t\n\t#make initial request for most recent tweets (200 is the maximum allowed count)\n\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200)\n\t\n\t#save most recent tweets\n\talltweets.extend(new_tweets)\n\t\n\t#save the id of the oldest tweet less one\n\toldest = alltweets[-1].id - 1\n\tprint \"oldest: \", oldest\n\tprint \"alltweets[0]: \", alltweets[0]\n\t#Converts first tweet to text\n\t#see http://stackoverflow.com/questions/27900451/convert-tweepy-status-object-into-json\n\tjson_str = json.dumps(alltweets[0]._json)\n\t#CONVERT TO LOOP TO DO SAME TO ALL TWEETS\n\trecord = {}\n\tprint \"len(alltweets)\", len(alltweets)\n\tfor tweet in alltweets:\n\t print \"type(tweet)\", type(tweet)\n\t json_str = json.dumps(tweet._json)\n\t print \"type(tweet) 2\", type(json_str)\n\t print \"json_str:\", json_str\n\t #Split tweet on commas to create an array\n\t tweetarray = json_str.split(', \"')\n\t #tweetid2 = json_str.split('/status/')[1].split('/')[0]\n\t tweetid = json_str.split('\"id\": ')[1].split(',')[0]\n\t tweettxt = json_str.split('\"text\": ')[1].split(', \"is_quote_status\"')[0]\n\t tweetdate = json_str.split('\"created_at\": \"')[2].split('\", \"')[0]\n\t name = json_str.split('\"name\": \"')[1].split('\", \"')[0]\n\t screenname = json_str.split('\"screen_name\": \"')[1].split('\", \"')[0]\n\t tweeturl = \"https://twitter.com/\"+screenname+\"/status/\"+tweetid\n\t record['tweetid'] = tweetid\n\t record['tweettxt'] = tweettxt\n\t record['tweetdate'] = tweetdate\n\t record['name'] = name\n\t record['screenname'] = screenname#\n\t record['tweeturl'] = tweeturl\n\t print \"record: \", record\n\t scraperwiki.sql.save(['tweetid'], record)\n #keep grabbing tweets until there are no tweets left to grab\n\n\twhile len(new_tweets) > 0:\n\t\tprint \"getting tweets before %s\" % (oldest)\n\t\t\n\t\t#all subsiquent requests use the max_id param to prevent duplicates\n\t\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)\n\t\t\n\t\t#save most recent tweetss\n\t\talltweets.extend(new_tweets)\n\t\t\n\t\t#update the id of the oldest tweet less one\n\t\toldest = alltweets[-1].id - 1\n\t\t\n\t\tprint \"...%s tweets downloaded so far\" % (len(alltweets))\n\t\t#transform the tweepy tweets into a 2D array that will populate the csv\t\n\t\t#outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\")] for tweet in alltweets]\n\t\t#need to convert to a dict before saving - could do it in the loop above rather than at end\n\t\tfor tweet in alltweets:\n\t\t print \"type(tweet)\", type(tweet)\n\t\t json_str = json.dumps(tweet._json)\n\t\t print \"type(tweet) 2\", type(json_str)\n\t\t print \"json_str:\", json_str\n\t\t tweetarray = json_str.split(', \"')\n\t\t tweetid = json_str.split('\"id\": ')[1].split(',')[0]\n\t\t tweettxt = json_str.split('\"text\": ')[1].split(', \"is_quote_status\"')[0]\n\t\t tweetdate = json_str.split('\"created_at\": \"')[2].split('\", \"')[0]\n\t\t name = json_str.split('\"name\": \"')[1].split('\", \"')[0]\n\t\t screenname = json_str.split('\"screen_name\": \"')[1].split('\", \"')[0]\n\t\t tweeturl = \"https://twitter.com/\"+screenname+\"/status/\"+tweetid\n\t\t record['tweetid'] = tweetid\n\t\t record['tweettxt'] = tweettxt\n\t\t record['tweetdate'] = tweetdate\n\t\t record['name'] = name\n\t\t record['screenname'] = screenname#\n\t\t record['tweeturl'] = tweeturl\n\t\t print \"record: \", record\n\t\t scraperwiki.sql.save(['tweetid'], record)\n\n#Add names as strings to this list\naccountslist = [] \nfor account in accountslist:\n if __name__ == '__main__':\n \t#pass in the username of the account you want to download\n \tget_all_tweets(account)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
<|reserved_special_token_0|>
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
def encoding_doc(token, words):
return token.texts_to_sequences(words)
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
<|reserved_special_token_0|>
displaySubjectVerbObject(list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
nlp = spacy.load('en_core_web_sm')
text = input('Enter the text to find the triplet: ')
str = nlp(text)
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
def encoding_doc(token, words):
return token.texts_to_sequences(words)
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
tuple_data = findTriplets(str)
list = creatingLists(tuple_data)
displaySubjectVerbObject(list)
<|reserved_special_token_1|>
import nltk
import spacy
import textacy
from keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from nltk import word_tokenize, re
from rasa import model
import pandas as pd
from spacy import lemmatizer
nlp = spacy.load('en_core_web_sm')
text = input('Enter the text to find the triplet: ')
str = nlp(text)
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
def encoding_doc(token, words):
return token.texts_to_sequences(words)
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
tuple_data = findTriplets(str)
list = creatingLists(tuple_data)
displaySubjectVerbObject(list)
<|reserved_special_token_1|>
import nltk
import spacy
import textacy
from keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from nltk import word_tokenize, re
from rasa import model
import pandas as pd
from spacy import lemmatizer
nlp = spacy.load('en_core_web_sm')
text=input("Enter the text to find the triplet: ")
str=nlp(text)
def load_dataset(filename):
df = pd.read_csv(filename, encoding="latin1",
names=["Sentence", "Intent"])
intent = df["Intent"]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = nltk.word_tokenize(clean)
# lemmatizing
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words,
filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def max_length(words):
return(len(max(words, key = len)))
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
def findTriplets(str):
tuple_data=textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists=list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
tuple_data=findTriplets(str)
list=creatingLists(tuple_data)
displaySubjectVerbObject(list)
|
flexible
|
{
"blob_id": "707855a4e07b68d9ae97c2e1dc8bfd52f11c314c",
"index": 1812,
"step-1": "<mask token>\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\n<mask token>\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\n<mask token>\ndisplaySubjectVerbObject(list)\n",
"step-3": "<mask token>\nnlp = spacy.load('en_core_web_sm')\ntext = input('Enter the text to find the triplet: ')\nstr = nlp(text)\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data = findTriplets(str)\nlist = creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)\n",
"step-4": "import nltk\nimport spacy\nimport textacy\nfrom keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.text import Tokenizer\nfrom nltk import word_tokenize, re\nfrom rasa import model\nimport pandas as pd\nfrom spacy import lemmatizer\nnlp = spacy.load('en_core_web_sm')\ntext = input('Enter the text to find the triplet: ')\nstr = nlp(text)\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data = findTriplets(str)\nlist = creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)\n",
"step-5": "import nltk\nimport spacy\nimport textacy\nfrom keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.text import Tokenizer\nfrom nltk import word_tokenize, re\nfrom rasa import model\nimport pandas as pd\nfrom spacy import lemmatizer\n\nnlp = spacy.load('en_core_web_sm')\n\ntext=input(\"Enter the text to find the triplet: \")\nstr=nlp(text)\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding=\"latin1\",\n names=[\"Sentence\", \"Intent\"])\n intent = df[\"Intent\"]\n unique_intent = list(set(intent))\n sentences = list(df[\"Sentence\"])\n\n return (intent, unique_intent, sentences)\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub(r'[^ a-z A-Z 0-9]', \" \", s)\n w = nltk.word_tokenize(clean)\n # lemmatizing\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n\n\n return words\n\ndef create_tokenizer(words,\n filters = '!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):\n token = Tokenizer(filters = filters)\n token.fit_on_texts(words)\n return token\ndef max_length(words):\n return(len(max(words, key = len)))\ndef encoding_doc(token, words):\n return(token.texts_to_sequences(words))\n\ndef findTriplets(str):\n tuple_data=textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\ndef creatingLists(tuple_data):\n tuple_to_lists=list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data=findTriplets(str)\nlist=creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
#!/usr/bin/python
###########################################################################
#
# Copyright 2019 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import sys
import time
import json
import ast
import cli_client as cc
from rpipe_utils import pipestr
from scripts.render_cli import show_cli_output
def prompt(msg):
prompt_msg = msg + " [confirm y/N]: "
x = raw_input(prompt_msg)
while x.lower() != "y" and x.lower() != "n":
print ("Invalid input, expected [y/N]")
x = raw_input(prompt_msg)
if x.lower() == "n":
exit(0)
def invoke(func, args):
body = None
aa = cc.ApiClient()
if func == 'rpc_sonic_interface_clear_counters':
keypath = cc.Path('/restconf/operations/sonic-interface:clear_counters')
body = {"sonic-interface:input":{"interface-param":args[0]}}
if args[0] == "all":
prompt("Clear all Interface counters")
elif args[0] == "PortChannel":
prompt("Clear all PortChannel interface counters")
elif args[0] == "Ethernet":
prompt("Clear all Ethernet interface counters")
else:
prompt("Clear counters for " + args[0])
return aa.post(keypath, body)
else:
return
def run(func, args):
try:
api_response = invoke(func,args)
status = api_response.content["sonic-interface:output"]
if status["status"] != 0:
print status["status-detail"]
# prompt() returns SystemExit exception when exit() is called
except SystemExit:
return
except:
print "%Error: Transaction Failure"
return
if __name__ == '__main__':
pipestr().write(sys.argv)
run(sys.argv[1], sys.argv[2:])
|
normal
|
{
"blob_id": "102ba5c1cb4beda6f9b82d37d9b343fe4f309cfb",
"index": 5268,
"step-1": "#!/usr/bin/python\n###########################################################################\n#\n# Copyright 2019 Dell, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###########################################################################\n\nimport sys\nimport time\nimport json\nimport ast\nimport cli_client as cc\nfrom rpipe_utils import pipestr\nfrom scripts.render_cli import show_cli_output\n\ndef prompt(msg):\n prompt_msg = msg + \" [confirm y/N]: \"\n x = raw_input(prompt_msg)\n while x.lower() != \"y\" and x.lower() != \"n\":\n print (\"Invalid input, expected [y/N]\")\n x = raw_input(prompt_msg)\n if x.lower() == \"n\":\n exit(0)\n\ndef invoke(func, args):\n body = None\n aa = cc.ApiClient()\n if func == 'rpc_sonic_interface_clear_counters':\n keypath = cc.Path('/restconf/operations/sonic-interface:clear_counters')\n body = {\"sonic-interface:input\":{\"interface-param\":args[0]}}\n if args[0] == \"all\":\n prompt(\"Clear all Interface counters\")\n elif args[0] == \"PortChannel\":\n prompt(\"Clear all PortChannel interface counters\")\n elif args[0] == \"Ethernet\":\n prompt(\"Clear all Ethernet interface counters\")\n else:\n prompt(\"Clear counters for \" + args[0])\n return aa.post(keypath, body)\n else:\n return \n\ndef run(func, args):\n try:\n api_response = invoke(func,args)\n status = api_response.content[\"sonic-interface:output\"]\n if status[\"status\"] != 0:\n print status[\"status-detail\"]\n # prompt() returns SystemExit exception when exit() is called\n except SystemExit:\n return\n except:\n print \"%Error: Transaction Failure\"\n return\n\n\nif __name__ == '__main__':\n pipestr().write(sys.argv)\n run(sys.argv[1], sys.argv[2:])\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
<|reserved_special_token_0|>
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError(
'Должно быть заполнено только одно из полей')
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError(
'Должно быть заполнено только одно из полей')
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
<|reserved_special_token_1|>
from django import forms
from .models import Picture
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError(
'Должно быть заполнено только одно из полей')
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
<|reserved_special_token_1|>
from django import forms
from .models import Picture
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError("Должно быть заполнено только одно из полей")
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0')
|
flexible
|
{
"blob_id": "3d45fd7dcb3b382efaefe2797ebeb33216a840fa",
"index": 680,
"step-1": "<mask token>\n\n\nclass PictureUploadForm(forms.ModelForm):\n\n\n class Meta:\n model = Picture\n exclude = ()\n <mask token>\n <mask token>\n\n\nclass PictureUpdateForm(forms.Form):\n width = forms.IntegerField()\n height = forms.IntegerField()\n size = forms.FloatField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['width'] < 1 or cleaned_data['height'\n ] < 1 or cleaned_data['size'] < 1:\n raise forms.ValidationError('Значения в полях должны быть больше 0'\n )\n",
"step-2": "<mask token>\n\n\nclass PictureUploadForm(forms.ModelForm):\n\n\n class Meta:\n model = Picture\n exclude = ()\n <mask token>\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['img'] and cleaned_data['urlImg']:\n raise forms.ValidationError(\n 'Должно быть заполнено только одно из полей')\n\n\nclass PictureUpdateForm(forms.Form):\n width = forms.IntegerField()\n height = forms.IntegerField()\n size = forms.FloatField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['width'] < 1 or cleaned_data['height'\n ] < 1 or cleaned_data['size'] < 1:\n raise forms.ValidationError('Значения в полях должны быть больше 0'\n )\n",
"step-3": "<mask token>\n\n\nclass PictureUploadForm(forms.ModelForm):\n\n\n class Meta:\n model = Picture\n exclude = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['img'] and cleaned_data['urlImg']:\n raise forms.ValidationError(\n 'Должно быть заполнено только одно из полей')\n\n\nclass PictureUpdateForm(forms.Form):\n width = forms.IntegerField()\n height = forms.IntegerField()\n size = forms.FloatField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['width'] < 1 or cleaned_data['height'\n ] < 1 or cleaned_data['size'] < 1:\n raise forms.ValidationError('Значения в полях должны быть больше 0'\n )\n",
"step-4": "from django import forms\nfrom .models import Picture\n\n\nclass PictureUploadForm(forms.ModelForm):\n\n\n class Meta:\n model = Picture\n exclude = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['img'] and cleaned_data['urlImg']:\n raise forms.ValidationError(\n 'Должно быть заполнено только одно из полей')\n\n\nclass PictureUpdateForm(forms.Form):\n width = forms.IntegerField()\n height = forms.IntegerField()\n size = forms.FloatField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['width'] < 1 or cleaned_data['height'\n ] < 1 or cleaned_data['size'] < 1:\n raise forms.ValidationError('Значения в полях должны быть больше 0'\n )\n",
"step-5": "from django import forms\nfrom .models import Picture\n\n\nclass PictureUploadForm(forms.ModelForm):\n\n class Meta:\n model = Picture\n exclude = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['img'] and cleaned_data['urlImg']:\n raise forms.ValidationError(\"Должно быть заполнено только одно из полей\")\n\n\nclass PictureUpdateForm(forms.Form):\n\n width = forms.IntegerField()\n height = forms.IntegerField()\n size = forms.FloatField()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control text-center'\n field.help_text = ''\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data['width'] < 1 or cleaned_data['height'] < 1 or cleaned_data['size'] < 1:\n raise forms.ValidationError('Значения в полях должны быть больше 0')",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
SOURCE_FILE = "D:\\temp\\twitter\\tweet.js"
TWITTER_USERNAME = 'roytang'
auto_tags = ["mtg"]
syndicated_sources = ["IFTTT", "Tumblr", "instagram.com", "Mailchimp", "Twitter Web", "TweetDeck", "mtgstorm"]
debug_id = None
# debug_id = "11143081155"
import frontmatter
import json
import requests
import urllib.request
from urllib.parse import urlparse, parse_qs, urldefrag
from urllib.error import HTTPError
import sys
from pathlib import Path
import os, shutil
import inspect
from datetime import datetime
import re
from utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder
cwd = Path.cwd()
contentdir = cwd / "content"
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
mediadir = Path("D:\\temp\\roy_mtg-twitter\\tweet_media")
retweetscache = load_map_from_json("d:\\temp\\twitter\\retweets.json")
resolver = URLResolver()
def loadurlmap(cleanupdupes=False):
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
urlmapfile = blogdir / "urlmap.json"
urlmap = {}
urlmapdupes = {}
with urlmapfile.open(encoding="UTF-8") as f:
tempurlmap = json.loads(f.read())
for u in tempurlmap:
u1 = tempurlmap[u]
if "syndicated" in u1:
for s in u1['syndicated']:
if 'url' in s:
su = s['url']
if su in urlmap:
# we expect syndicated urls to be unique,
# so if it's already in the map,
# it must be a dupe
# (This is really just to clean up my own mess!)
if su not in urlmapdupes:
urlmapdupes[su] = [u1, urlmap[su]]
else:
urlmapdupes[su].append(u1)
else:
urlmap[su] = u1
urlmap[u] = u1
title = u1.get("title", "").strip()
if len(title) > 0:
urlmap[title] = u1
if cleanupdupes:
# clean up any found dupes by syndicated url
for su in urlmapdupes:
dupes = urlmapdupes[su]
canonical = None
for_deletion = []
for d in dupes:
if d["source_path"].startswith("post") or d["source_path"].startswith("links") or len(d['syndicated']) > 2:
if canonical is not None:
print("\n\r##### WTH. More than one canonical urls were detected for %s" % (su))
print(json.dumps(dupes, indent=4))
canonical = d
else:
for_deletion.append(d)
if canonical is None:
print("##### Dupes were detected for %s but no canonical url found!" % (su))
print(dupes)
else:
urlmap[su] = canonical
for d in for_deletion:
source_path = Path(d['source_path'])
full_path = contentdir / source_path
if full_path.exists():
os.remove(str(full_path))
return urlmap
urlmap = loadurlmap(False)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def add_syndication(mdfile, url, stype):
with mdfile.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s["type"] == stype and s["url"] == url:
# dont add a duplicate!
return
post['syndicated'].append({
'type': stype,
'url': url
})
newfile = frontmatter.dumps(post)
with mdfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if "entities" in t:
# get raw urls in the text
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)
# replace mentions with link
for m in t["entities"]["user_mentions"]:
screen_name = m["screen_name"]
# replace with markdown link
mdlink = "[@%s](https://twitter.com/%s/)" % (screen_name, screen_name)
content = content.replace("@"+screen_name, mdlink)
processed_urls = []
# clean urls
for u in t["entities"]["urls"]:
url = u["url"]
processed_urls.append(url)
expanded_url = u["expanded_url"]
processed_urls.append(expanded_url)
# print("##### A URL!!! %s" % expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
# find urls that were not in the entities
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [
{
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, t['id'])
}
]
kind = "notes"
if "in_reply_to_status_id_str" in t and "in_reply_to_screen_name" in t:
kind = "replies"
post["reply_to"] = {
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (t['in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
"name": t["in_reply_to_screen_name"],
"label": "%s's tweet" % (t["in_reply_to_screen_name"])
}
elif t["full_text"].startswith("RT @"):
rc = retweetscache.get(id)
if rc is None:
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
else:
if "retweeted_user" in rc:
kind = "reposts"
post['repost_source'] = {
"type": "twitter",
"name": rc["retweeted_user"],
"url": "https://twitter.com/%s/statuses/%s/" % (rc['retweeted_user'], rc['retweeted_id'])
}
# dont process reposts for now
# return False
else:
# 785744070027030528 fails this
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
# else:
# # dont process others for now
# return False
media = []
for m in t.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
if len(media) > 0:
if kind != "reposts" and kind != "replies":
kind = "photos"
# dont process media for now
# return False
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall(r"\s#(\w+)", " " + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post["tags"] = tags
post["source"] = "twitter"
outdir = contentdir / kind / d.strftime("%Y") / d.strftime("%m")
if len(media) > 0:
outdir = outdir / (id)
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / ( "index.md" )
# find photos
for imgfile in mediadir.glob(id + "*.*"):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / ( id + ".md" )
newfile = frontmatter.dumps(post)
with outfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
return True
def process_syn_url(d1, raw_url, url):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1["full_text"])
url = url.replace("www.instagram.com", "instagram.com")
url = url.replace("/roytang0400", "")
url = urldefrag(url)[0]
if url.find("instagram.com") >= 0 and url.find("?") >= 0:
# remove utm and other misc query params from insta urls
url = url.split("?")[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
if url.find("://roytang.net") >= 0 or url.find("://mtgstorm.com") >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
# try matching by title
title_search_term = d1["full_text"]
title_search_term = title_search_term.replace("New blog post: ", "")
title_search_term = title_search_term.replace("New post: ", "")
title_search_term = title_search_term.replace(raw_url, "")
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
else:
print("######## Unmatched roytang url: %s" % (url))
print(d1["full_text"])
return True
return False
def process_tweet(d1):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'].startswith('photos\\'):
# no need to process further any tweets that are already mapped to a post
return True
tweet_source = d1["source"]
# print("#### %s: %s" % (tweet_source, orig_tweet_url))
# detect content syndicated from elsewhere
# instagram, tumblr, roytang.net
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get("urls", []):
raw_url = u["url"]
url = u["expanded_url"]
if process_syn_url(d1, raw_url, url):
return True
# print("######## URL = %s" % (url))
# also process raw urls
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1["full_text"])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1["id_str"] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1["source"]
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
replies = replies + 1
is_reply = True
# handle retweet
is_retweet = False
content = d1["full_text"]
if content.startswith("RT @"):
retweets = retweets + 1
is_retweet = True
media = []
if "extended_entities" in d1:
for m in d1["extended_entities"]["media"]:
media.append(m["media_url_https"])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
# if idx > 100:
# break
# save the url cache for future use
resolver.save_cache()
for source in countbysource:
print("countbysource: %s = %s" % (source, countbysource[source]))
print("replies: %s" % (replies))
print("retweets: %s" % (retweets))
print("withmedia: %s" % (withmedia))
print("raw: %s" % (raw))
print("total: %s" % (idx))
def thread_replies():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
# process in reverse order so tweet sequences are in order
d = reversed(d)
for d1 in d:
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
is_reply = True
if not is_reply:
continue
id_str = d1['id_str']
# if id_str != "602009895437737984" and id_str != "602009747294924802":
# continue
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, id_str)
# dont bother if already syndicated
if orig_tweet_url in urlmap:
continue
date = datetime.strptime(d1['created_at'], "%a %b %d %H:%M:%S %z %Y")
# process replies to myself
if d1["in_reply_to_screen_name"] == TWITTER_USERNAME:
replied_to_url = "https://twitter.com/%s/statuses/%s/" % (d1['in_reply_to_screen_name'], d1['in_reply_to_status_id_str'])
info = urlmap[replied_to_url]
source_path = Path(info['source_path'])
full_path = contentdir / source_path
# welp, we might as well move them to bundles
if full_path.name == "index.md":
parentdir = full_path.parent
else:
parentdir = full_path.parent / full_path.stem
if not parentdir.exists():
parentdir.mkdir(parents=True)
oldfile = full_path
full_path = parentdir / "index.md"
shutil.move(str(oldfile), str(full_path))
# also update the urlmap!
urlmap[replied_to_url]['source_path'] = str(full_path.relative_to(contentdir))
# append the reply to the original post, and add it as a syndication as well
with full_path.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
post['syndicated'].append({
'type': 'twitter',
'url': orig_tweet_url
})
content = get_content(d1)
post.content = post.content + "\n\r" + content
newfile = frontmatter.dumps(post)
with full_path.open("w", encoding="UTF-8") as w:
w.write(newfile)
# copy over any media from the reply as well
media = []
for m in d1.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
for imgfile in mediadir.glob(d1["id_str"] + "*.*"):
to_file = parentdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
# delete any existing file created for this reply
oldfile = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str + ".md")
if oldfile.exists():
os.remove(str(oldfile))
oldfolder = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str)
if oldfolder.exists():
shutil.rmtree(str(oldfolder))
# replace this entry in the urlmap! this is so that succeeding replies can find the correct root tweet to attach to
urlmap[orig_tweet_url] = info
else:
continue
idx = idx + 1
print(idx)
from utils import urlmap_to_mdfile
def cleanup_videos():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1["id_str"])
info = urlmap.get(orig_tweet_url)
if info is None:
continue
for m in d1.get("extended_entities", {}).get("media", []):
if "video_info" in m:
videos = []
lowest_bitrate = 1000000000000
lowest_video = ""
for vi in m["video_info"]["variants"]:
if 'bitrate' in vi:
videos.append(vi["url"])
bitrate = int(vi['bitrate'])
if bitrate < lowest_bitrate:
lowest_video = vi["url"]
lowest_bitrate = bitrate
mdfile = urlmap_to_mdfile(info)
if str(mdfile).find("\\photos\\") >= 0:
print(mdfile)
# move it to notes, since it's not a photo
p = PostBuilder.from_mdfile(mdfile)
p.kind = "notes"
p.save()
# delete the old files
container = mdfile.parent
for f in container.iterdir():
os.remove(str(f))
container.rmdir()
continue
# delete all the video files except for the one with the lowest bitrate
for v in videos:
if v == lowest_video:
continue
name = Path(v).name
if name.find("?") >= 0:
name = name.split("?")[0]
vfilename = d1["id_str"] + "-" + name
vfile = container / vfilename
print(vfile)
os.remove(str(vfile))
def stats():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
count_by_year = {}
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for t in d:
dt = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1
print(json.dumps(count_by_year, indent=2))
# thread_replies()
# import_all()
# cleanup_videos()
stats()
|
normal
|
{
"blob_id": "001d2ae89a2d008fdf6621a1be73de94c766c65f",
"index": 4570,
"step-1": "<mask token>\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\n<mask token>\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / 'urlmap.json'\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding='UTF-8') as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if 'syndicated' in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get('title', '').strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d['source_path'].startswith('post') or d['source_path'\n ].startswith('links') or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\n '\\n\\r##### WTH. More than one canonical urls were detected for %s'\n % su)\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n if canonical is None:\n print(\n '##### Dupes were detected for %s but no canonical url found!'\n % su)\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\n\n<mask token>\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s['type'] == stype and s['url'] == url:\n return\n post['syndicated'].append({'type': stype, 'url': url})\n newfile = frontmatter.dumps(post)\n with mdfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [{'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]\n kind = 'notes'\n if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:\n kind = 'replies'\n post['reply_to'] = {'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (t[\n 'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n 'name': t['in_reply_to_screen_name'], 'label': \"%s's tweet\" % t\n ['in_reply_to_screen_name']}\n elif t['full_text'].startswith('RT @'):\n rc = retweetscache.get(id)\n if rc is None:\n pass\n elif 'retweeted_user' in rc:\n kind = 'reposts'\n post['repost_source'] = {'type': 'twitter', 'name': rc[\n 'retweeted_user'], 'url': \n 'https://twitter.com/%s/statuses/%s/' % (rc[\n 'retweeted_user'], rc['retweeted_id'])}\n else:\n pass\n media = []\n for m in t.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n if len(media) > 0:\n if kind != 'reposts' and kind != 'replies':\n kind = 'photos'\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n parsed_tags = re.findall('\\\\s#(\\\\w+)', ' ' + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post['tags'] = tags\n post['source'] = 'twitter'\n outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')\n if len(media) > 0:\n outdir = outdir / id\n if not outdir.exists():\n outdir.mkdir(parents=True)\n if len(media) > 0:\n outfile = outdir / 'index.md'\n for imgfile in mediadir.glob(id + '*.*'):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n else:\n outfile = outdir / (id + '.md')\n newfile = frontmatter.dumps(post)\n with outfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n return True\n\n\n<mask token>\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\n<mask token>\n\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / 'urlmap.json'\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding='UTF-8') as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if 'syndicated' in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get('title', '').strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d['source_path'].startswith('post') or d['source_path'\n ].startswith('links') or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\n '\\n\\r##### WTH. More than one canonical urls were detected for %s'\n % su)\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n if canonical is None:\n print(\n '##### Dupes were detected for %s but no canonical url found!'\n % su)\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\n\n<mask token>\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s['type'] == stype and s['url'] == url:\n return\n post['syndicated'].append({'type': stype, 'url': url})\n newfile = frontmatter.dumps(post)\n with mdfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [{'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]\n kind = 'notes'\n if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:\n kind = 'replies'\n post['reply_to'] = {'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (t[\n 'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n 'name': t['in_reply_to_screen_name'], 'label': \"%s's tweet\" % t\n ['in_reply_to_screen_name']}\n elif t['full_text'].startswith('RT @'):\n rc = retweetscache.get(id)\n if rc is None:\n pass\n elif 'retweeted_user' in rc:\n kind = 'reposts'\n post['repost_source'] = {'type': 'twitter', 'name': rc[\n 'retweeted_user'], 'url': \n 'https://twitter.com/%s/statuses/%s/' % (rc[\n 'retweeted_user'], rc['retweeted_id'])}\n else:\n pass\n media = []\n for m in t.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n if len(media) > 0:\n if kind != 'reposts' and kind != 'replies':\n kind = 'photos'\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n parsed_tags = re.findall('\\\\s#(\\\\w+)', ' ' + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post['tags'] = tags\n post['source'] = 'twitter'\n outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')\n if len(media) > 0:\n outdir = outdir / id\n if not outdir.exists():\n outdir.mkdir(parents=True)\n if len(media) > 0:\n outfile = outdir / 'index.md'\n for imgfile in mediadir.glob(id + '*.*'):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n else:\n outfile = outdir / (id + '.md')\n newfile = frontmatter.dumps(post)\n with outfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n return True\n\n\ndef process_syn_url(d1, raw_url, url):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n url, no_errors = resolver.get_final_url(url)\n if not no_errors:\n print(d1['full_text'])\n url = url.replace('www.instagram.com', 'instagram.com')\n url = url.replace('/roytang0400', '')\n url = urldefrag(url)[0]\n if url.find('instagram.com') >= 0 and url.find('?') >= 0:\n url = url.split('?')[0]\n if url in urlmap:\n u = urlmap[url]\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n if url.find('://roytang.net') >= 0 or url.find('://mtgstorm.com') >= 0:\n link_url = urlparse(url)\n u = urlmap.get(link_url.path, None)\n if u is None:\n title_search_term = d1['full_text']\n title_search_term = title_search_term.replace('New blog post: ', ''\n )\n title_search_term = title_search_term.replace('New post: ', '')\n title_search_term = title_search_term.replace(raw_url, '')\n title_search_term = title_search_term.strip()\n u = urlmap.get(title_search_term, None)\n if u is not None:\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n else:\n print('######## Unmatched roytang url: %s' % url)\n print(d1['full_text'])\n return True\n return False\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\ndef thread_replies():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n d = reversed(d)\n for d1 in d:\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n is_reply = True\n if not is_reply:\n continue\n id_str = d1['id_str']\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, id_str)\n if orig_tweet_url in urlmap:\n continue\n date = datetime.strptime(d1['created_at'],\n '%a %b %d %H:%M:%S %z %Y')\n if d1['in_reply_to_screen_name'] == TWITTER_USERNAME:\n replied_to_url = 'https://twitter.com/%s/statuses/%s/' % (d1\n ['in_reply_to_screen_name'], d1[\n 'in_reply_to_status_id_str'])\n info = urlmap[replied_to_url]\n source_path = Path(info['source_path'])\n full_path = contentdir / source_path\n if full_path.name == 'index.md':\n parentdir = full_path.parent\n else:\n parentdir = full_path.parent / full_path.stem\n if not parentdir.exists():\n parentdir.mkdir(parents=True)\n oldfile = full_path\n full_path = parentdir / 'index.md'\n shutil.move(str(oldfile), str(full_path))\n urlmap[replied_to_url]['source_path'] = str(full_path.\n relative_to(contentdir))\n with full_path.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n post['syndicated'].append({'type': 'twitter', 'url':\n orig_tweet_url})\n content = get_content(d1)\n post.content = post.content + '\\n\\r' + content\n newfile = frontmatter.dumps(post)\n with full_path.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n media = []\n for m in d1.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n for imgfile in mediadir.glob(d1['id_str'] + '*.*'):\n to_file = parentdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n oldfile = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / (id_str + '.md')\n if oldfile.exists():\n os.remove(str(oldfile))\n oldfolder = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / id_str\n if oldfolder.exists():\n shutil.rmtree(str(oldfolder))\n urlmap[orig_tweet_url] = info\n else:\n continue\n idx = idx + 1\n print(idx)\n\n\n<mask token>\n\n\ndef cleanup_videos():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, d1['id_str'])\n info = urlmap.get(orig_tweet_url)\n if info is None:\n continue\n for m in d1.get('extended_entities', {}).get('media', []):\n if 'video_info' in m:\n videos = []\n lowest_bitrate = 1000000000000\n lowest_video = ''\n for vi in m['video_info']['variants']:\n if 'bitrate' in vi:\n videos.append(vi['url'])\n bitrate = int(vi['bitrate'])\n if bitrate < lowest_bitrate:\n lowest_video = vi['url']\n lowest_bitrate = bitrate\n mdfile = urlmap_to_mdfile(info)\n if str(mdfile).find('\\\\photos\\\\') >= 0:\n print(mdfile)\n p = PostBuilder.from_mdfile(mdfile)\n p.kind = 'notes'\n p.save()\n container = mdfile.parent\n for f in container.iterdir():\n os.remove(str(f))\n container.rmdir()\n continue\n for v in videos:\n if v == lowest_video:\n continue\n name = Path(v).name\n if name.find('?') >= 0:\n name = name.split('?')[0]\n vfilename = d1['id_str'] + '-' + name\n vfile = container / vfilename\n print(vfile)\n os.remove(str(vfile))\n\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\n<mask token>\n",
"step-4": "SOURCE_FILE = 'D:\\\\temp\\\\twitter\\\\tweet.js'\nTWITTER_USERNAME = 'roytang'\nauto_tags = ['mtg']\nsyndicated_sources = ['IFTTT', 'Tumblr', 'instagram.com', 'Mailchimp',\n 'Twitter Web', 'TweetDeck', 'mtgstorm']\ndebug_id = None\nimport frontmatter\nimport json\nimport requests\nimport urllib.request\nfrom urllib.parse import urlparse, parse_qs, urldefrag\nfrom urllib.error import HTTPError\nimport sys\nfrom pathlib import Path\nimport os, shutil\nimport inspect\nfrom datetime import datetime\nimport re\nfrom utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder\ncwd = Path.cwd()\ncontentdir = cwd / 'content'\nblogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\nmediadir = Path('D:\\\\temp\\\\roy_mtg-twitter\\\\tweet_media')\nretweetscache = load_map_from_json('d:\\\\temp\\\\twitter\\\\retweets.json')\nresolver = URLResolver()\n\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / 'urlmap.json'\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding='UTF-8') as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if 'syndicated' in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get('title', '').strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d['source_path'].startswith('post') or d['source_path'\n ].startswith('links') or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\n '\\n\\r##### WTH. More than one canonical urls were detected for %s'\n % su)\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n if canonical is None:\n print(\n '##### Dupes were detected for %s but no canonical url found!'\n % su)\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\n\nurlmap = loadurlmap(False)\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s['type'] == stype and s['url'] == url:\n return\n post['syndicated'].append({'type': stype, 'url': url})\n newfile = frontmatter.dumps(post)\n with mdfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [{'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]\n kind = 'notes'\n if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:\n kind = 'replies'\n post['reply_to'] = {'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (t[\n 'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n 'name': t['in_reply_to_screen_name'], 'label': \"%s's tweet\" % t\n ['in_reply_to_screen_name']}\n elif t['full_text'].startswith('RT @'):\n rc = retweetscache.get(id)\n if rc is None:\n pass\n elif 'retweeted_user' in rc:\n kind = 'reposts'\n post['repost_source'] = {'type': 'twitter', 'name': rc[\n 'retweeted_user'], 'url': \n 'https://twitter.com/%s/statuses/%s/' % (rc[\n 'retweeted_user'], rc['retweeted_id'])}\n else:\n pass\n media = []\n for m in t.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n if len(media) > 0:\n if kind != 'reposts' and kind != 'replies':\n kind = 'photos'\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n parsed_tags = re.findall('\\\\s#(\\\\w+)', ' ' + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post['tags'] = tags\n post['source'] = 'twitter'\n outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')\n if len(media) > 0:\n outdir = outdir / id\n if not outdir.exists():\n outdir.mkdir(parents=True)\n if len(media) > 0:\n outfile = outdir / 'index.md'\n for imgfile in mediadir.glob(id + '*.*'):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n else:\n outfile = outdir / (id + '.md')\n newfile = frontmatter.dumps(post)\n with outfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n return True\n\n\ndef process_syn_url(d1, raw_url, url):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n url, no_errors = resolver.get_final_url(url)\n if not no_errors:\n print(d1['full_text'])\n url = url.replace('www.instagram.com', 'instagram.com')\n url = url.replace('/roytang0400', '')\n url = urldefrag(url)[0]\n if url.find('instagram.com') >= 0 and url.find('?') >= 0:\n url = url.split('?')[0]\n if url in urlmap:\n u = urlmap[url]\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n if url.find('://roytang.net') >= 0 or url.find('://mtgstorm.com') >= 0:\n link_url = urlparse(url)\n u = urlmap.get(link_url.path, None)\n if u is None:\n title_search_term = d1['full_text']\n title_search_term = title_search_term.replace('New blog post: ', ''\n )\n title_search_term = title_search_term.replace('New post: ', '')\n title_search_term = title_search_term.replace(raw_url, '')\n title_search_term = title_search_term.strip()\n u = urlmap.get(title_search_term, None)\n if u is not None:\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n else:\n print('######## Unmatched roytang url: %s' % url)\n print(d1['full_text'])\n return True\n return False\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\ndef thread_replies():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n d = reversed(d)\n for d1 in d:\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n is_reply = True\n if not is_reply:\n continue\n id_str = d1['id_str']\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, id_str)\n if orig_tweet_url in urlmap:\n continue\n date = datetime.strptime(d1['created_at'],\n '%a %b %d %H:%M:%S %z %Y')\n if d1['in_reply_to_screen_name'] == TWITTER_USERNAME:\n replied_to_url = 'https://twitter.com/%s/statuses/%s/' % (d1\n ['in_reply_to_screen_name'], d1[\n 'in_reply_to_status_id_str'])\n info = urlmap[replied_to_url]\n source_path = Path(info['source_path'])\n full_path = contentdir / source_path\n if full_path.name == 'index.md':\n parentdir = full_path.parent\n else:\n parentdir = full_path.parent / full_path.stem\n if not parentdir.exists():\n parentdir.mkdir(parents=True)\n oldfile = full_path\n full_path = parentdir / 'index.md'\n shutil.move(str(oldfile), str(full_path))\n urlmap[replied_to_url]['source_path'] = str(full_path.\n relative_to(contentdir))\n with full_path.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n post['syndicated'].append({'type': 'twitter', 'url':\n orig_tweet_url})\n content = get_content(d1)\n post.content = post.content + '\\n\\r' + content\n newfile = frontmatter.dumps(post)\n with full_path.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n media = []\n for m in d1.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n for imgfile in mediadir.glob(d1['id_str'] + '*.*'):\n to_file = parentdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n oldfile = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / (id_str + '.md')\n if oldfile.exists():\n os.remove(str(oldfile))\n oldfolder = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / id_str\n if oldfolder.exists():\n shutil.rmtree(str(oldfolder))\n urlmap[orig_tweet_url] = info\n else:\n continue\n idx = idx + 1\n print(idx)\n\n\nfrom utils import urlmap_to_mdfile\n\n\ndef cleanup_videos():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, d1['id_str'])\n info = urlmap.get(orig_tweet_url)\n if info is None:\n continue\n for m in d1.get('extended_entities', {}).get('media', []):\n if 'video_info' in m:\n videos = []\n lowest_bitrate = 1000000000000\n lowest_video = ''\n for vi in m['video_info']['variants']:\n if 'bitrate' in vi:\n videos.append(vi['url'])\n bitrate = int(vi['bitrate'])\n if bitrate < lowest_bitrate:\n lowest_video = vi['url']\n lowest_bitrate = bitrate\n mdfile = urlmap_to_mdfile(info)\n if str(mdfile).find('\\\\photos\\\\') >= 0:\n print(mdfile)\n p = PostBuilder.from_mdfile(mdfile)\n p.kind = 'notes'\n p.save()\n container = mdfile.parent\n for f in container.iterdir():\n os.remove(str(f))\n container.rmdir()\n continue\n for v in videos:\n if v == lowest_video:\n continue\n name = Path(v).name\n if name.find('?') >= 0:\n name = name.split('?')[0]\n vfilename = d1['id_str'] + '-' + name\n vfile = container / vfilename\n print(vfile)\n os.remove(str(vfile))\n\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\nstats()\n",
"step-5": "SOURCE_FILE = \"D:\\\\temp\\\\twitter\\\\tweet.js\"\nTWITTER_USERNAME = 'roytang'\nauto_tags = [\"mtg\"]\nsyndicated_sources = [\"IFTTT\", \"Tumblr\", \"instagram.com\", \"Mailchimp\", \"Twitter Web\", \"TweetDeck\", \"mtgstorm\"]\ndebug_id = None\n# debug_id = \"11143081155\" \n\nimport frontmatter\nimport json\nimport requests\nimport urllib.request\nfrom urllib.parse import urlparse, parse_qs, urldefrag\nfrom urllib.error import HTTPError\nimport sys\nfrom pathlib import Path\nimport os, shutil\nimport inspect\nfrom datetime import datetime\nimport re\nfrom utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder\n\ncwd = Path.cwd()\ncontentdir = cwd / \"content\"\nblogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\nmediadir = Path(\"D:\\\\temp\\\\roy_mtg-twitter\\\\tweet_media\")\n\nretweetscache = load_map_from_json(\"d:\\\\temp\\\\twitter\\\\retweets.json\")\n\nresolver = URLResolver()\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / \"urlmap.json\"\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding=\"UTF-8\") as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if \"syndicated\" in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n # we expect syndicated urls to be unique, \n # so if it's already in the map,\n # it must be a dupe\n # (This is really just to clean up my own mess!)\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get(\"title\", \"\").strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n # clean up any found dupes by syndicated url\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d[\"source_path\"].startswith(\"post\") or d[\"source_path\"].startswith(\"links\") or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\"\\n\\r##### WTH. More than one canonical urls were detected for %s\" % (su))\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n\n if canonical is None:\n print(\"##### Dupes were detected for %s but no canonical url found!\" % (su))\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\nurlmap = loadurlmap(False)\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding=\"UTF-8\") as f:\n try:\n post = frontmatter.load(f)\n except:\n print(\"Error parsing file\")\n return\n\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s[\"type\"] == stype and s[\"url\"] == url:\n # dont add a duplicate!\n return\n\n post['syndicated'].append({\n 'type': stype,\n 'url': url\n })\n newfile = frontmatter.dumps(post)\n with mdfile.open(\"w\", encoding=\"UTF-8\") as w:\n w.write(newfile)\n \ndef get_content(t):\n content = t['full_text']\n if \"entities\" in t:\n # get raw urls in the text\n raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)\n # replace mentions with link\n for m in t[\"entities\"][\"user_mentions\"]:\n screen_name = m[\"screen_name\"]\n # replace with markdown link\n mdlink = \"[@%s](https://twitter.com/%s/)\" % (screen_name, screen_name)\n content = content.replace(\"@\"+screen_name, mdlink)\n processed_urls = []\n # clean urls\n for u in t[\"entities\"][\"urls\"]:\n url = u[\"url\"]\n processed_urls.append(url)\n expanded_url = u[\"expanded_url\"]\n processed_urls.append(expanded_url)\n # print(\"##### A URL!!! %s\" % expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n\n # find urls that were not in the entities\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n\n return content\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [\n {\n \"type\": \"twitter\",\n \"url\": \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, t['id'])\n }\n ]\n\n kind = \"notes\"\n if \"in_reply_to_status_id_str\" in t and \"in_reply_to_screen_name\" in t:\n kind = \"replies\"\n post[\"reply_to\"] = {\n \"type\": \"twitter\",\n \"url\": \"https://twitter.com/%s/statuses/%s/\" % (t['in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n \"name\": t[\"in_reply_to_screen_name\"],\n \"label\": \"%s's tweet\" % (t[\"in_reply_to_screen_name\"]) \n }\n elif t[\"full_text\"].startswith(\"RT @\"):\n rc = retweetscache.get(id)\n if rc is None:\n # RTed status is inaccessible, we'll just render it as an ordinary note\n pass\n else:\n if \"retweeted_user\" in rc:\n kind = \"reposts\"\n post['repost_source'] = {\n \"type\": \"twitter\",\n \"name\": rc[\"retweeted_user\"],\n \"url\": \"https://twitter.com/%s/statuses/%s/\" % (rc['retweeted_user'], rc['retweeted_id'])\n } \n # dont process reposts for now\n # return False\n else:\n # 785744070027030528 fails this\n # RTed status is inaccessible, we'll just render it as an ordinary note\n pass\n\n # else:\n # # dont process others for now\n # return False\n\n media = []\n for m in t.get(\"extended_entities\", {}).get(\"media\", []):\n media.append(m[\"media_url_https\"])\n if len(media) > 0:\n if kind != \"reposts\" and kind != \"replies\":\n kind = \"photos\" \n \n # dont process media for now\n # return False\n\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n\n parsed_tags = re.findall(r\"\\s#(\\w+)\", \" \" + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post[\"tags\"] = tags\n\n post[\"source\"] = \"twitter\"\n outdir = contentdir / kind / d.strftime(\"%Y\") / d.strftime(\"%m\")\n if len(media) > 0:\n outdir = outdir / (id)\n\n if not outdir.exists():\n outdir.mkdir(parents=True)\n\n if len(media) > 0:\n outfile = outdir / ( \"index.md\" )\n # find photos\n for imgfile in mediadir.glob(id + \"*.*\"):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file)) \n else:\n outfile = outdir / ( id + \".md\" )\n\n newfile = frontmatter.dumps(post)\n with outfile.open(\"w\", encoding=\"UTF-8\") as w:\n w.write(newfile)\n return True\n\ndef process_syn_url(d1, raw_url, url):\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, d1['id_str'])\n\n url, no_errors = resolver.get_final_url(url)\n if not no_errors:\n print(d1[\"full_text\"])\n\n url = url.replace(\"www.instagram.com\", \"instagram.com\")\n url = url.replace(\"/roytang0400\", \"\")\n url = urldefrag(url)[0]\n if url.find(\"instagram.com\") >= 0 and url.find(\"?\") >= 0:\n # remove utm and other misc query params from insta urls\n url = url.split(\"?\")[0]\n if url in urlmap:\n u = urlmap[url]\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, \"twitter\")\n return True\n\n if url.find(\"://roytang.net\") >= 0 or url.find(\"://mtgstorm.com\") >= 0:\n link_url = urlparse(url)\n u = urlmap.get(link_url.path, None)\n if u is None:\n # try matching by title\n title_search_term = d1[\"full_text\"]\n title_search_term = title_search_term.replace(\"New blog post: \", \"\")\n title_search_term = title_search_term.replace(\"New post: \", \"\")\n title_search_term = title_search_term.replace(raw_url, \"\")\n title_search_term = title_search_term.strip()\n u = urlmap.get(title_search_term, None)\n if u is not None:\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, \"twitter\")\n return True\n else:\n print(\"######## Unmatched roytang url: %s\" % (url))\n print(d1[\"full_text\"])\n return True\n\n return False\n\ndef process_tweet(d1):\n\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, d1['id_str'])\n\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'].startswith('photos\\\\'):\n # no need to process further any tweets that are already mapped to a post\n return True\n\n tweet_source = d1[\"source\"]\n # print(\"#### %s: %s\" % (tweet_source, orig_tweet_url))\n # detect content syndicated from elsewhere\n # instagram, tumblr, roytang.net\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get(\"urls\", []):\n raw_url = u[\"url\"]\n url = u[\"expanded_url\"]\n if process_syn_url(d1, raw_url, url):\n return True\n # print(\"######## URL = %s\" % (url))\n\n # also process raw urls\n raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1[\"full_text\"])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n\n return create_post(d1)\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1[\"id_str\"] != debug_id:\n continue\n\n if process_tweet(d1):\n continue\n\n tweet_source = d1[\"source\"]\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n\n is_reply = False\n if \"in_reply_to_status_id_str\" in d1 and \"in_reply_to_screen_name\" in d1:\n replies = replies + 1\n is_reply = True\n\n # handle retweet\n is_retweet = False\n content = d1[\"full_text\"]\n if content.startswith(\"RT @\"):\n retweets = retweets + 1\n is_retweet = True\n\n media = []\n if \"extended_entities\" in d1:\n for m in d1[\"extended_entities\"][\"media\"]:\n media.append(m[\"media_url_https\"])\n\n if len(media) > 0:\n withmedia = withmedia + 1\n\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n\n idx = idx + 1\n # if idx > 100:\n # break\n\n # save the url cache for future use\n resolver.save_cache()\n\n for source in countbysource:\n print(\"countbysource: %s = %s\" % (source, countbysource[source]))\n print(\"replies: %s\" % (replies))\n print(\"retweets: %s\" % (retweets))\n print(\"withmedia: %s\" % (withmedia))\n print(\"raw: %s\" % (raw))\n print(\"total: %s\" % (idx))\n\ndef thread_replies():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n # process in reverse order so tweet sequences are in order\n d = reversed(d)\n for d1 in d:\n is_reply = False\n if \"in_reply_to_status_id_str\" in d1 and \"in_reply_to_screen_name\" in d1:\n is_reply = True\n if not is_reply:\n continue\n id_str = d1['id_str']\n # if id_str != \"602009895437737984\" and id_str != \"602009747294924802\":\n # continue\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, id_str)\n # dont bother if already syndicated\n if orig_tweet_url in urlmap:\n continue\n date = datetime.strptime(d1['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n # process replies to myself\n if d1[\"in_reply_to_screen_name\"] == TWITTER_USERNAME:\n replied_to_url = \"https://twitter.com/%s/statuses/%s/\" % (d1['in_reply_to_screen_name'], d1['in_reply_to_status_id_str'])\n info = urlmap[replied_to_url]\n source_path = Path(info['source_path'])\n full_path = contentdir / source_path\n # welp, we might as well move them to bundles\n if full_path.name == \"index.md\":\n parentdir = full_path.parent\n else:\n parentdir = full_path.parent / full_path.stem\n if not parentdir.exists():\n parentdir.mkdir(parents=True)\n oldfile = full_path\n full_path = parentdir / \"index.md\"\n shutil.move(str(oldfile), str(full_path))\n # also update the urlmap!\n urlmap[replied_to_url]['source_path'] = str(full_path.relative_to(contentdir))\n # append the reply to the original post, and add it as a syndication as well\n with full_path.open(encoding=\"UTF-8\") as f:\n try:\n post = frontmatter.load(f)\n except:\n print(\"Error parsing file\")\n return\n post['syndicated'].append({\n 'type': 'twitter',\n 'url': orig_tweet_url\n })\n content = get_content(d1)\n post.content = post.content + \"\\n\\r\" + content\n newfile = frontmatter.dumps(post)\n with full_path.open(\"w\", encoding=\"UTF-8\") as w:\n w.write(newfile)\n # copy over any media from the reply as well\n media = []\n for m in d1.get(\"extended_entities\", {}).get(\"media\", []):\n media.append(m[\"media_url_https\"])\n for imgfile in mediadir.glob(d1[\"id_str\"] + \"*.*\"):\n to_file = parentdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file)) \n # delete any existing file created for this reply\n oldfile = contentdir / \"replies\" / date.strftime(\"%Y\") / date.strftime(\"%m\") / (id_str + \".md\")\n if oldfile.exists():\n os.remove(str(oldfile))\n oldfolder = contentdir / \"replies\" / date.strftime(\"%Y\") / date.strftime(\"%m\") / (id_str)\n if oldfolder.exists():\n shutil.rmtree(str(oldfolder))\n # replace this entry in the urlmap! this is so that succeeding replies can find the correct root tweet to attach to\n urlmap[orig_tweet_url] = info\n else:\n continue\n\n idx = idx + 1\n print(idx)\n\nfrom utils import urlmap_to_mdfile\n\ndef cleanup_videos():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, d1[\"id_str\"])\n info = urlmap.get(orig_tweet_url)\n if info is None:\n continue\n for m in d1.get(\"extended_entities\", {}).get(\"media\", []):\n if \"video_info\" in m:\n videos = []\n lowest_bitrate = 1000000000000\n lowest_video = \"\"\n for vi in m[\"video_info\"][\"variants\"]:\n if 'bitrate' in vi:\n videos.append(vi[\"url\"])\n bitrate = int(vi['bitrate'])\n if bitrate < lowest_bitrate:\n lowest_video = vi[\"url\"]\n lowest_bitrate = bitrate\n \n mdfile = urlmap_to_mdfile(info)\n if str(mdfile).find(\"\\\\photos\\\\\") >= 0:\n print(mdfile)\n # move it to notes, since it's not a photo\n p = PostBuilder.from_mdfile(mdfile)\n p.kind = \"notes\"\n p.save() \n # delete the old files\n container = mdfile.parent\n for f in container.iterdir():\n os.remove(str(f))\n container.rmdir()\n continue\n # delete all the video files except for the one with the lowest bitrate\n for v in videos:\n if v == lowest_video:\n continue\n name = Path(v).name\n if name.find(\"?\") >= 0:\n name = name.split(\"?\")[0]\n vfilename = d1[\"id_str\"] + \"-\" + name\n vfile = container / vfilename\n print(vfile)\n os.remove(str(vfile))\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\n# thread_replies()\n# import_all()\n# cleanup_videos()\nstats()\n",
"step-ids": [
3,
8,
11,
14,
15
]
}
|
[
3,
8,
11,
14,
15
] |
# filename: cycle_break.py
# for i in range(1, 101):
# if i % 3 == 0 and i % 8 == 0:
# print(i)
# break
num = 1
while num <= 100:
if num % 4 == 0 and num % 6 == 0:
print(num)
break
num += 1
|
normal
|
{
"blob_id": "d04506e67071abf36d43a828d90fbe0f14230103",
"index": 3208,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile num <= 100:\n if num % 4 == 0 and num % 6 == 0:\n print(num)\n break\n num += 1\n",
"step-3": "num = 1\nwhile num <= 100:\n if num % 4 == 0 and num % 6 == 0:\n print(num)\n break\n num += 1\n",
"step-4": "# filename: cycle_break.py\n\n# for i in range(1, 101):\n# if i % 3 == 0 and i % 8 == 0:\n# print(i)\n# break\n\nnum = 1\nwhile num <= 100:\n if num % 4 == 0 and num % 6 == 0:\n print(num)\n break\n num += 1",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def max_depth_bottom_up(root):
if not root:
return 0
max_so_far = 0
def max_depth(node, depth):
nonlocal max_so_far
if not node.left and not node.right:
max_so_far = max(max_so_far, depth)
else:
if node.left:
max_depth(node.left, 1 + depth)
if node.right:
max_depth(node.right, 1 + depth)
max_depth(root, 1)
return max_so_far
def max_depth_top_down(root):
if not root:
return 0
return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.
right))
<|reserved_special_token_0|>
def has_path_sum(root, target_sum):
def path_sum(node, sum_left):
if not node:
return False
if not node.left and not node.right and node.val == sum_left:
return True
return path_sum(node.left, sum_left - node.val) or path_sum(node.
right, sum_left - node.val)
return path_sum(root, target_sum)
def build_tree_from_inorder_preorder(inorder, preorder):
if not inorder or not preorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(preorder.pop(0))
mid = inorder_map[node.val]
node.left = helper(lo, mid - 1)
node.right = helper(mid + 1, hi)
return node
return helper(0, len(inorder) - 1)
<|reserved_special_token_0|>
def next_right_pointer(root):
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.next = None
"""
levels = []
to_do = [root]
while to_do:
cur_level = []
next_to_do = []
for n in to_do:
if n is not None:
cur_level.append(n)
next_to_do += [n.left, n.right]
if cur_level:
levels.append(cur_level)
to_do = next_to_do
for level in levels[1:]:
level.append(None)
for i in range(1, len(level)):
level[i - 1].next = level[i]
return root
<|reserved_special_token_0|>
def serialize_tree(root):
levels = level_order_traversal(root)
return levels
def deserialize_tree(serialized):
if not serialized:
return None
levels = copy.deepcopy(serialized)
root = TreeNode(levels.pop(0)[0])
nodes = [root]
while levels:
level = levels.pop(0)
next_nodes = []
for i, node in enumerate(nodes):
if node:
node.left = TreeNode(level[2 * i]) if level[2 * i] else None
node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1
] else None
next_nodes += [node.left, node.right]
else:
next_nodes += [None, None]
nodes = next_nodes
return root
def equal(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return root1.val == root2.val and equal(root1.left, root2.left) and equal(
root1.right, root2.right)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def max_depth_bottom_up(root):
if not root:
return 0
max_so_far = 0
def max_depth(node, depth):
nonlocal max_so_far
if not node.left and not node.right:
max_so_far = max(max_so_far, depth)
else:
if node.left:
max_depth(node.left, 1 + depth)
if node.right:
max_depth(node.right, 1 + depth)
max_depth(root, 1)
return max_so_far
def max_depth_top_down(root):
if not root:
return 0
return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.
right))
<|reserved_special_token_0|>
def has_path_sum(root, target_sum):
def path_sum(node, sum_left):
if not node:
return False
if not node.left and not node.right and node.val == sum_left:
return True
return path_sum(node.left, sum_left - node.val) or path_sum(node.
right, sum_left - node.val)
return path_sum(root, target_sum)
def build_tree_from_inorder_preorder(inorder, preorder):
if not inorder or not preorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(preorder.pop(0))
mid = inorder_map[node.val]
node.left = helper(lo, mid - 1)
node.right = helper(mid + 1, hi)
return node
return helper(0, len(inorder) - 1)
<|reserved_special_token_0|>
def next_right_pointer(root):
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.next = None
"""
levels = []
to_do = [root]
while to_do:
cur_level = []
next_to_do = []
for n in to_do:
if n is not None:
cur_level.append(n)
next_to_do += [n.left, n.right]
if cur_level:
levels.append(cur_level)
to_do = next_to_do
for level in levels[1:]:
level.append(None)
for i in range(1, len(level)):
level[i - 1].next = level[i]
return root
<|reserved_special_token_0|>
def lowest_common_ancestor_2(root, p, q):
if root == p or root == q:
return root
left = right = None
if root.left:
left = lowest_common_ancestor_2(root.left, p, q)
if root.right:
right = lowest_common_ancestor_2(root.right, p, q)
if left and right:
return root
else:
return left or right
<|reserved_special_token_0|>
def serialize_tree(root):
levels = level_order_traversal(root)
return levels
def deserialize_tree(serialized):
if not serialized:
return None
levels = copy.deepcopy(serialized)
root = TreeNode(levels.pop(0)[0])
nodes = [root]
while levels:
level = levels.pop(0)
next_nodes = []
for i, node in enumerate(nodes):
if node:
node.left = TreeNode(level[2 * i]) if level[2 * i] else None
node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1
] else None
next_nodes += [node.left, node.right]
else:
next_nodes += [None, None]
nodes = next_nodes
return root
def equal(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return root1.val == root2.val and equal(root1.left, root2.left) and equal(
root1.right, root2.right)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def max_depth_bottom_up(root):
if not root:
return 0
max_so_far = 0
def max_depth(node, depth):
nonlocal max_so_far
if not node.left and not node.right:
max_so_far = max(max_so_far, depth)
else:
if node.left:
max_depth(node.left, 1 + depth)
if node.right:
max_depth(node.right, 1 + depth)
max_depth(root, 1)
return max_so_far
def max_depth_top_down(root):
if not root:
return 0
return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.
right))
<|reserved_special_token_0|>
def has_path_sum(root, target_sum):
def path_sum(node, sum_left):
if not node:
return False
if not node.left and not node.right and node.val == sum_left:
return True
return path_sum(node.left, sum_left - node.val) or path_sum(node.
right, sum_left - node.val)
return path_sum(root, target_sum)
def build_tree_from_inorder_preorder(inorder, preorder):
if not inorder or not preorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(preorder.pop(0))
mid = inorder_map[node.val]
node.left = helper(lo, mid - 1)
node.right = helper(mid + 1, hi)
return node
return helper(0, len(inorder) - 1)
def build_tree_from_inorder_postorder(inorder, postorder):
if not inorder or not postorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(postorder.pop())
mid = inorder_map[node.val]
node.right = helper(mid + 1, hi)
node.left = helper(lo, mid - 1)
return node
return helper(0, len(inorder) - 1)
def next_right_pointer(root):
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.next = None
"""
levels = []
to_do = [root]
while to_do:
cur_level = []
next_to_do = []
for n in to_do:
if n is not None:
cur_level.append(n)
next_to_do += [n.left, n.right]
if cur_level:
levels.append(cur_level)
to_do = next_to_do
for level in levels[1:]:
level.append(None)
for i in range(1, len(level)):
level[i - 1].next = level[i]
return root
def lowest_common_ancestor(root, p, q):
answer = None
def recurse_tree(node):
nonlocal answer
if not node:
return False
left = recurse_tree(node.left)
right = recurse_tree(node.right)
mid = node == p or node == q
if mid + left + right >= 2:
answer = node
return mid or left or right
recurse_tree(root)
return answer
def lowest_common_ancestor_2(root, p, q):
if root == p or root == q:
return root
left = right = None
if root.left:
left = lowest_common_ancestor_2(root.left, p, q)
if root.right:
right = lowest_common_ancestor_2(root.right, p, q)
if left and right:
return root
else:
return left or right
def lowest_common_ancestor_3(root, p, q):
stack = [root]
parents = {root: None}
while p not in parents or q not in parents:
node = stack.pop()
if node.left:
parents[node.left] = node
stack.append(node.left)
if node.right:
parents[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p)
p = parents[p]
while q not in ancestors:
q = parents[q]
return q
def serialize_tree(root):
levels = level_order_traversal(root)
return levels
def deserialize_tree(serialized):
if not serialized:
return None
levels = copy.deepcopy(serialized)
root = TreeNode(levels.pop(0)[0])
nodes = [root]
while levels:
level = levels.pop(0)
next_nodes = []
for i, node in enumerate(nodes):
if node:
node.left = TreeNode(level[2 * i]) if level[2 * i] else None
node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1
] else None
next_nodes += [node.left, node.right]
else:
next_nodes += [None, None]
nodes = next_nodes
return root
def equal(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return root1.val == root2.val and equal(root1.left, root2.left) and equal(
root1.right, root2.right)
<|reserved_special_token_1|>
import copy
from basics.binary_tree.binary_tree import TreeNode
from basics.binary_tree.traversals import level_order_traversal
def max_depth_bottom_up(root):
if not root:
return 0
max_so_far = 0
def max_depth(node, depth):
nonlocal max_so_far
if not node.left and not node.right:
max_so_far = max(max_so_far, depth)
else:
if node.left:
max_depth(node.left, 1 + depth)
if node.right:
max_depth(node.right, 1 + depth)
max_depth(root, 1)
return max_so_far
def max_depth_top_down(root):
if not root:
return 0
return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.
right))
def is_symmetric(root):
def is_mirror(left, right):
if left is None and right is None:
return True
elif left is None or right is None:
return False
else:
return left.val == right.val and is_mirror(left.right, right.left
) and is_mirror(left.left, right.right)
return is_mirror(root, root)
def has_path_sum(root, target_sum):
def path_sum(node, sum_left):
if not node:
return False
if not node.left and not node.right and node.val == sum_left:
return True
return path_sum(node.left, sum_left - node.val) or path_sum(node.
right, sum_left - node.val)
return path_sum(root, target_sum)
def build_tree_from_inorder_preorder(inorder, preorder):
if not inorder or not preorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(preorder.pop(0))
mid = inorder_map[node.val]
node.left = helper(lo, mid - 1)
node.right = helper(mid + 1, hi)
return node
return helper(0, len(inorder) - 1)
def build_tree_from_inorder_postorder(inorder, postorder):
if not inorder or not postorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(postorder.pop())
mid = inorder_map[node.val]
node.right = helper(mid + 1, hi)
node.left = helper(lo, mid - 1)
return node
return helper(0, len(inorder) - 1)
def next_right_pointer(root):
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.next = None
"""
levels = []
to_do = [root]
while to_do:
cur_level = []
next_to_do = []
for n in to_do:
if n is not None:
cur_level.append(n)
next_to_do += [n.left, n.right]
if cur_level:
levels.append(cur_level)
to_do = next_to_do
for level in levels[1:]:
level.append(None)
for i in range(1, len(level)):
level[i - 1].next = level[i]
return root
def lowest_common_ancestor(root, p, q):
answer = None
def recurse_tree(node):
nonlocal answer
if not node:
return False
left = recurse_tree(node.left)
right = recurse_tree(node.right)
mid = node == p or node == q
if mid + left + right >= 2:
answer = node
return mid or left or right
recurse_tree(root)
return answer
def lowest_common_ancestor_2(root, p, q):
if root == p or root == q:
return root
left = right = None
if root.left:
left = lowest_common_ancestor_2(root.left, p, q)
if root.right:
right = lowest_common_ancestor_2(root.right, p, q)
if left and right:
return root
else:
return left or right
def lowest_common_ancestor_3(root, p, q):
stack = [root]
parents = {root: None}
while p not in parents or q not in parents:
node = stack.pop()
if node.left:
parents[node.left] = node
stack.append(node.left)
if node.right:
parents[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p)
p = parents[p]
while q not in ancestors:
q = parents[q]
return q
def serialize_tree(root):
levels = level_order_traversal(root)
return levels
def deserialize_tree(serialized):
if not serialized:
return None
levels = copy.deepcopy(serialized)
root = TreeNode(levels.pop(0)[0])
nodes = [root]
while levels:
level = levels.pop(0)
next_nodes = []
for i, node in enumerate(nodes):
if node:
node.left = TreeNode(level[2 * i]) if level[2 * i] else None
node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1
] else None
next_nodes += [node.left, node.right]
else:
next_nodes += [None, None]
nodes = next_nodes
return root
def equal(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return root1.val == root2.val and equal(root1.left, root2.left) and equal(
root1.right, root2.right)
<|reserved_special_token_1|>
import copy
from basics.binary_tree.binary_tree import TreeNode
from basics.binary_tree.traversals import level_order_traversal
def max_depth_bottom_up(root):
if not root:
return 0
max_so_far = 0
def max_depth(node, depth):
nonlocal max_so_far
if not node.left and not node.right:
max_so_far = max(max_so_far, depth)
else:
if node.left:
max_depth(node.left, 1 + depth)
if node.right:
max_depth(node.right, 1 + depth)
max_depth(root, 1)
return max_so_far
def max_depth_top_down(root):
if not root:
return 0
return 1 + max(max_depth_top_down(root.left),
max_depth_top_down(root.right))
def is_symmetric(root):
def is_mirror(left, right):
if left is None and right is None:
return True
elif left is None or right is None:
return False
else:
return (left.val == right.val and
is_mirror(left.right, right.left) and
is_mirror(left.left, right.right))
return is_mirror(root, root)
def has_path_sum(root, target_sum):
def path_sum(node, sum_left):
if not node:
return False
if not node.left and not node.right and node.val == sum_left:
return True
return (path_sum(node.left, sum_left-node.val) or
path_sum(node.right, sum_left - node.val))
return path_sum(root, target_sum)
def build_tree_from_inorder_preorder(inorder, preorder):
if not inorder or not preorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(preorder.pop(0))
mid = inorder_map[node.val]
node.left = helper(lo, mid - 1)
node.right = helper(mid + 1, hi)
return node
return helper(0, len(inorder) - 1)
def build_tree_from_inorder_postorder(inorder, postorder):
if not inorder or not postorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(postorder.pop())
mid = inorder_map[node.val]
node.right = helper(mid+1, hi)
node.left = helper(lo, mid-1)
return node
return helper(0, len(inorder)-1)
def next_right_pointer(root):
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.next = None
"""
levels = []
to_do = [root]
while to_do:
cur_level = []
next_to_do = []
for n in to_do:
if n is not None:
cur_level.append(n)
next_to_do += [n.left, n.right]
if cur_level:
levels.append(cur_level)
to_do = next_to_do
for level in levels[1:]:
level.append(None)
for i in range(1, len(level)):
level[i-1].next = level[i]
return root
def lowest_common_ancestor(root, p, q):
answer = None
def recurse_tree(node):
nonlocal answer
if not node:
return False
left = recurse_tree(node.left)
right = recurse_tree(node.right)
mid = node == p or node == q
if mid + left + right >= 2:
answer = node
return mid or left or right
recurse_tree(root)
return answer
def lowest_common_ancestor_2(root, p, q):
if root == p or root == q:
return root
left = right = None
if root.left:
left = lowest_common_ancestor_2(root.left, p, q)
if root.right:
right = lowest_common_ancestor_2(root.right, p, q)
if left and right:
return root
else:
return left or right
def lowest_common_ancestor_3(root, p, q):
stack = [root]
parents = {root: None}
while p not in parents or q not in parents:
node = stack.pop()
if node.left:
parents[node.left] = node
stack.append(node.left)
if node.right:
parents[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p)
p = parents[p]
while q not in ancestors:
q = parents[q]
return q
def serialize_tree(root):
levels = level_order_traversal(root)
return levels
def deserialize_tree(serialized):
if not serialized:
return None
levels = copy.deepcopy(serialized)
root = TreeNode(levels.pop(0)[0])
nodes = [root]
while levels:
level = levels.pop(0)
next_nodes = []
for i, node in enumerate(nodes):
if node:
node.left = TreeNode(level[2*i]) if level[2*i] else None
node.right = TreeNode(level[2*i+1]) if level[2*i+1] else None
next_nodes += [node.left, node.right]
else:
next_nodes += [None, None]
nodes = next_nodes
return root
def equal(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return (root1.val == root2.val and
equal(root1.left, root2.left) and
equal(root1.right, root2.right))
|
flexible
|
{
"blob_id": "555646a5d57152034b467cbce16b6c183bcfbb37",
"index": 6658,
"step-1": "<mask token>\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\n<mask token>\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\n<mask token>\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\n<mask token>\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-2": "<mask token>\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\n<mask token>\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\n<mask token>\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\n<mask token>\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n left = right = None\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n if left and right:\n return root\n else:\n return left or right\n\n\n<mask token>\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-3": "<mask token>\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\n<mask token>\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef build_tree_from_inorder_postorder(inorder, postorder):\n if not inorder or not postorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(postorder.pop())\n mid = inorder_map[node.val]\n node.right = helper(mid + 1, hi)\n node.left = helper(lo, mid - 1)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\ndef lowest_common_ancestor(root, p, q):\n answer = None\n\n def recurse_tree(node):\n nonlocal answer\n if not node:\n return False\n left = recurse_tree(node.left)\n right = recurse_tree(node.right)\n mid = node == p or node == q\n if mid + left + right >= 2:\n answer = node\n return mid or left or right\n recurse_tree(root)\n return answer\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n left = right = None\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n if left and right:\n return root\n else:\n return left or right\n\n\ndef lowest_common_ancestor_3(root, p, q):\n stack = [root]\n parents = {root: None}\n while p not in parents or q not in parents:\n node = stack.pop()\n if node.left:\n parents[node.left] = node\n stack.append(node.left)\n if node.right:\n parents[node.right] = node\n stack.append(node.right)\n ancestors = set()\n while p:\n ancestors.add(p)\n p = parents[p]\n while q not in ancestors:\n q = parents[q]\n return q\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-4": "import copy\nfrom basics.binary_tree.binary_tree import TreeNode\nfrom basics.binary_tree.traversals import level_order_traversal\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\ndef is_symmetric(root):\n\n def is_mirror(left, right):\n if left is None and right is None:\n return True\n elif left is None or right is None:\n return False\n else:\n return left.val == right.val and is_mirror(left.right, right.left\n ) and is_mirror(left.left, right.right)\n return is_mirror(root, root)\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef build_tree_from_inorder_postorder(inorder, postorder):\n if not inorder or not postorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(postorder.pop())\n mid = inorder_map[node.val]\n node.right = helper(mid + 1, hi)\n node.left = helper(lo, mid - 1)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\ndef lowest_common_ancestor(root, p, q):\n answer = None\n\n def recurse_tree(node):\n nonlocal answer\n if not node:\n return False\n left = recurse_tree(node.left)\n right = recurse_tree(node.right)\n mid = node == p or node == q\n if mid + left + right >= 2:\n answer = node\n return mid or left or right\n recurse_tree(root)\n return answer\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n left = right = None\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n if left and right:\n return root\n else:\n return left or right\n\n\ndef lowest_common_ancestor_3(root, p, q):\n stack = [root]\n parents = {root: None}\n while p not in parents or q not in parents:\n node = stack.pop()\n if node.left:\n parents[node.left] = node\n stack.append(node.left)\n if node.right:\n parents[node.right] = node\n stack.append(node.right)\n ancestors = set()\n while p:\n ancestors.add(p)\n p = parents[p]\n while q not in ancestors:\n q = parents[q]\n return q\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-5": "import copy\n\nfrom basics.binary_tree.binary_tree import TreeNode\nfrom basics.binary_tree.traversals import level_order_traversal\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n\n max_depth(root, 1)\n\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left),\n max_depth_top_down(root.right))\n\n\ndef is_symmetric(root):\n\n def is_mirror(left, right):\n if left is None and right is None:\n return True\n elif left is None or right is None:\n return False\n else:\n return (left.val == right.val and\n is_mirror(left.right, right.left) and\n is_mirror(left.left, right.right))\n\n return is_mirror(root, root)\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return (path_sum(node.left, sum_left-node.val) or\n path_sum(node.right, sum_left - node.val))\n\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n\n return helper(0, len(inorder) - 1)\n\n\ndef build_tree_from_inorder_postorder(inorder, postorder):\n if not inorder or not postorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(postorder.pop())\n mid = inorder_map[node.val]\n node.right = helper(mid+1, hi)\n node.left = helper(lo, mid-1)\n return node\n\n return helper(0, len(inorder)-1)\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i-1].next = level[i]\n\n return root\n\n\ndef lowest_common_ancestor(root, p, q):\n answer = None\n\n def recurse_tree(node):\n nonlocal answer\n if not node:\n return False\n\n left = recurse_tree(node.left)\n right = recurse_tree(node.right)\n\n mid = node == p or node == q\n if mid + left + right >= 2:\n answer = node\n\n return mid or left or right\n\n recurse_tree(root)\n\n return answer\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n\n left = right = None\n\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n\n if left and right:\n return root\n else:\n return left or right\n\n\ndef lowest_common_ancestor_3(root, p, q):\n stack = [root]\n parents = {root: None}\n\n while p not in parents or q not in parents:\n node = stack.pop()\n if node.left:\n parents[node.left] = node\n stack.append(node.left)\n if node.right:\n parents[node.right] = node\n stack.append(node.right)\n\n ancestors = set()\n while p:\n ancestors.add(p)\n p = parents[p]\n\n while q not in ancestors:\n q = parents[q]\n\n return q\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2*i]) if level[2*i] else None\n node.right = TreeNode(level[2*i+1]) if level[2*i+1] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return (root1.val == root2.val and\n equal(root1.left, root2.left) and\n equal(root1.right, root2.right))\n\n\n\n\n\n\n",
"step-ids": [
8,
9,
12,
14,
15
]
}
|
[
8,
9,
12,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='CoreMLModules', version='0.1.0', url=
'https://github.com/AfricasVoices/CoreMLModules', packages=[
'core_ml_modules'], setup_requires=['pytest-runner'], install_requires=
['numpy', 'scikit-learn', 'nltk'], tests_require=['pytest<=3.6.4'])
<|reserved_special_token_1|>
from setuptools import setup
setup(name='CoreMLModules', version='0.1.0', url=
'https://github.com/AfricasVoices/CoreMLModules', packages=[
'core_ml_modules'], setup_requires=['pytest-runner'], install_requires=
['numpy', 'scikit-learn', 'nltk'], tests_require=['pytest<=3.6.4'])
<|reserved_special_token_1|>
from setuptools import setup
setup(
name="CoreMLModules",
version="0.1.0",
url="https://github.com/AfricasVoices/CoreMLModules",
packages=["core_ml_modules"],
setup_requires=["pytest-runner"],
install_requires=["numpy", "scikit-learn", "nltk"],
tests_require=["pytest<=3.6.4"]
)
|
flexible
|
{
"blob_id": "24cd3a1a05a1cfa638b8264fd89b36ee63b29f89",
"index": 1625,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='CoreMLModules', version='0.1.0', url=\n 'https://github.com/AfricasVoices/CoreMLModules', packages=[\n 'core_ml_modules'], setup_requires=['pytest-runner'], install_requires=\n ['numpy', 'scikit-learn', 'nltk'], tests_require=['pytest<=3.6.4'])\n",
"step-3": "from setuptools import setup\nsetup(name='CoreMLModules', version='0.1.0', url=\n 'https://github.com/AfricasVoices/CoreMLModules', packages=[\n 'core_ml_modules'], setup_requires=['pytest-runner'], install_requires=\n ['numpy', 'scikit-learn', 'nltk'], tests_require=['pytest<=3.6.4'])\n",
"step-4": "from setuptools import setup\n\nsetup(\n name=\"CoreMLModules\",\n version=\"0.1.0\",\n url=\"https://github.com/AfricasVoices/CoreMLModules\",\n packages=[\"core_ml_modules\"],\n setup_requires=[\"pytest-runner\"],\n install_requires=[\"numpy\", \"scikit-learn\", \"nltk\"],\n tests_require=[\"pytest<=3.6.4\"]\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def replace_bold_tags(l=''):
line_with_bold = re.match('(.*)__(.*)__(.*)', l)
if line_with_bold:
return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)
) + line_with_bold.group(3)
return l
<|reserved_special_token_0|>
def apply_p_tag_if_no_tag(l=''):
return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)
def check_if_list_item(l=''):
list_item = re.match('\\* (.*)', l)
if list_item:
return LIST_ITEM.format(list_item.group(1))
return False
def is_last_line(i, _list):
return _list.index(i) == len(_list) - 1
def parse(markdown):
lines = markdown.split('\n')
res = ''
current_list = ''
for i in lines:
line = replace_header_tags(i)
line = replace_bold_tags(line)
line = replace_italic_tags(line)
list_item = check_if_list_item(line)
if list_item:
current_list += list_item
res += U_LIST.format(current_list) if is_last_line(i, lines
) else ''
else:
res += U_LIST.format(current_list) if current_list else ''
current_list = ''
res += apply_p_tag_if_no_tag(line)
return res
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def replace_bold_tags(l=''):
line_with_bold = re.match('(.*)__(.*)__(.*)', l)
if line_with_bold:
return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)
) + line_with_bold.group(3)
return l
def replace_italic_tags(l=''):
line_with_ital = re.match('(.*)_(.*)_(.*)', l)
if line_with_ital:
return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)
) + line_with_ital.group(3)
return l
def apply_p_tag_if_no_tag(l=''):
return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)
def check_if_list_item(l=''):
list_item = re.match('\\* (.*)', l)
if list_item:
return LIST_ITEM.format(list_item.group(1))
return False
def is_last_line(i, _list):
return _list.index(i) == len(_list) - 1
def parse(markdown):
lines = markdown.split('\n')
res = ''
current_list = ''
for i in lines:
line = replace_header_tags(i)
line = replace_bold_tags(line)
line = replace_italic_tags(line)
list_item = check_if_list_item(line)
if list_item:
current_list += list_item
res += U_LIST.format(current_list) if is_last_line(i, lines
) else ''
else:
res += U_LIST.format(current_list) if current_list else ''
current_list = ''
res += apply_p_tag_if_no_tag(line)
return res
<|reserved_special_token_1|>
<|reserved_special_token_0|>
OPENING_TAG = '<{}>'
CLOSING_TAG = '</{}>'
U_LIST = '<ul>{}</ul>'
LIST_ITEM = '<li>{}</li>'
STRONG = '<strong>{}</strong>'
ITALIC = '<em>{}</em>'
PARAGRAPH = '<p>{}</p>'
HEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':
'h3', '##': 'h2', '#': 'h1'})
def replace_header_tags(l=''):
for k, v in HEADERS.items():
line_with_header = re.match(f'{k} (.*)', l)
if line_with_header:
rest_string = line_with_header.group(1)
return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)
return l
def replace_bold_tags(l=''):
line_with_bold = re.match('(.*)__(.*)__(.*)', l)
if line_with_bold:
return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)
) + line_with_bold.group(3)
return l
def replace_italic_tags(l=''):
line_with_ital = re.match('(.*)_(.*)_(.*)', l)
if line_with_ital:
return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)
) + line_with_ital.group(3)
return l
def apply_p_tag_if_no_tag(l=''):
return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)
def check_if_list_item(l=''):
list_item = re.match('\\* (.*)', l)
if list_item:
return LIST_ITEM.format(list_item.group(1))
return False
def is_last_line(i, _list):
return _list.index(i) == len(_list) - 1
def parse(markdown):
lines = markdown.split('\n')
res = ''
current_list = ''
for i in lines:
line = replace_header_tags(i)
line = replace_bold_tags(line)
line = replace_italic_tags(line)
list_item = check_if_list_item(line)
if list_item:
current_list += list_item
res += U_LIST.format(current_list) if is_last_line(i, lines
) else ''
else:
res += U_LIST.format(current_list) if current_list else ''
current_list = ''
res += apply_p_tag_if_no_tag(line)
return res
<|reserved_special_token_1|>
import re
from collections import OrderedDict
OPENING_TAG = '<{}>'
CLOSING_TAG = '</{}>'
U_LIST = '<ul>{}</ul>'
LIST_ITEM = '<li>{}</li>'
STRONG = '<strong>{}</strong>'
ITALIC = '<em>{}</em>'
PARAGRAPH = '<p>{}</p>'
HEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':
'h3', '##': 'h2', '#': 'h1'})
def replace_header_tags(l=''):
for k, v in HEADERS.items():
line_with_header = re.match(f'{k} (.*)', l)
if line_with_header:
rest_string = line_with_header.group(1)
return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)
return l
def replace_bold_tags(l=''):
line_with_bold = re.match('(.*)__(.*)__(.*)', l)
if line_with_bold:
return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)
) + line_with_bold.group(3)
return l
def replace_italic_tags(l=''):
line_with_ital = re.match('(.*)_(.*)_(.*)', l)
if line_with_ital:
return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)
) + line_with_ital.group(3)
return l
def apply_p_tag_if_no_tag(l=''):
return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)
def check_if_list_item(l=''):
list_item = re.match('\\* (.*)', l)
if list_item:
return LIST_ITEM.format(list_item.group(1))
return False
def is_last_line(i, _list):
return _list.index(i) == len(_list) - 1
def parse(markdown):
lines = markdown.split('\n')
res = ''
current_list = ''
for i in lines:
line = replace_header_tags(i)
line = replace_bold_tags(line)
line = replace_italic_tags(line)
list_item = check_if_list_item(line)
if list_item:
current_list += list_item
res += U_LIST.format(current_list) if is_last_line(i, lines
) else ''
else:
res += U_LIST.format(current_list) if current_list else ''
current_list = ''
res += apply_p_tag_if_no_tag(line)
return res
<|reserved_special_token_1|>
import re
from collections import OrderedDict
OPENING_TAG = '<{}>'
CLOSING_TAG= '</{}>'
U_LIST = '<ul>{}</ul>'
LIST_ITEM = '<li>{}</li>'
STRONG = '<strong>{}</strong>'
ITALIC = '<em>{}</em>'
PARAGRAPH = '<p>{}</p>'
HEADERS = OrderedDict({'######': 'h6',
'#####': 'h5',
'####': 'h4',
'###:': 'h3',
'##': 'h2',
'#': 'h1'})
def replace_header_tags(l=''):
for k,v in HEADERS.items():
line_with_header = re.match(f'{k} (.*)', l)
if line_with_header:
rest_string = line_with_header.group(1)
return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)
return l
def replace_bold_tags(l=''):
line_with_bold = re.match('(.*)__(.*)__(.*)', l)
if line_with_bold:
return line_with_bold.group(1) + \
STRONG.format(line_with_bold.group(2)) + line_with_bold.group(3)
return l
def replace_italic_tags(l=''):
line_with_ital = re.match('(.*)_(.*)_(.*)', l)
if line_with_ital:
return line_with_ital.group(1) + \
ITALIC.format(line_with_ital.group(2)) + line_with_ital.group(3)
return l
def apply_p_tag_if_no_tag(l=''):
return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)
def check_if_list_item(l=''):
list_item = re.match(r'\* (.*)', l)
if list_item:
return LIST_ITEM.format(list_item.group(1))
return False
def is_last_line(i, _list):
return _list.index(i) == len(_list) - 1
def parse(markdown):
lines = markdown.split('\n')
res = ''
current_list = ''
for i in lines:
line = replace_header_tags(i)
line = replace_bold_tags(line)
line = replace_italic_tags(line)
list_item = check_if_list_item(line)
if list_item:
current_list += list_item
res += U_LIST.format(current_list) if is_last_line(i, lines) else ''
else:
res += U_LIST.format(current_list) if current_list else ''
current_list = ''
res += apply_p_tag_if_no_tag(line)
return res
|
flexible
|
{
"blob_id": "6b0b60ec571cf026d0f0cff3d9517362c16b459b",
"index": 6092,
"step-1": "<mask token>\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\n<mask token>\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-2": "<mask token>\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-3": "<mask token>\nOPENING_TAG = '<{}>'\nCLOSING_TAG = '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':\n 'h3', '##': 'h2', '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k, v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-4": "import re\nfrom collections import OrderedDict\nOPENING_TAG = '<{}>'\nCLOSING_TAG = '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':\n 'h3', '##': 'h2', '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k, v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-5": "import re\nfrom collections import OrderedDict\n\nOPENING_TAG = '<{}>'\nCLOSING_TAG= '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6',\n '#####': 'h5',\n '####': 'h4',\n '###:': 'h3',\n '##': 'h2',\n '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k,v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + \\\n STRONG.format(line_with_bold.group(2)) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + \\\n ITALIC.format(line_with_ital.group(2)) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match(r'\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
# -*- coding: utf-8 -
#
# This file is part of gaffer. See the NOTICE for more information.
import os
from .base import Command
from ...httpclient import Server
class Load(Command):
"""\
Load a Procfile application to gafferd
======================================
This command allows you to load your Procfile application
in gafferd.
Command line
------------
$ gaffer load [name] [url]
Arguments
+++++++++
*name* is the name of the group of process recoreded in gafferd.
By default it will be the name of your project folder.You can use
``.`` to specify the current folder.
*uri* is the url to connect to a gaffer node. By default
'http://127.0.0.1:5000'
Options
+++++++
**--endpoint**
Gaffer node URL to connect.
"""
name = "load"
def run(self, procfile, pargs):
args = pargs.args
# get args
uri = None
if len(args) == 2:
group = args[0]
uri = args[1]
elif len(args) == 1:
group = args[0]
else:
group = "."
if pargs.endpoint:
uri = pargs.endpoint
if not uri:
uri = "http://127.0.0.1:5000"
# get the default groupname
if group == ".":
group = procfile.get_groupname()
# create a server instance
s = Server(uri)
# finally manage group conflicts
group = self.find_groupname(group, s)
# parse the concurrency settings
concurrency = self.parse_concurrency(pargs)
# finally send the processes
for name, cmd_str in procfile.processes():
cmd, args = procfile.parse_cmd(cmd_str)
pname = "%s:%s" % (group, name)
params = dict(args=args, env=procfile.env,
numprocesses=concurrency.get(name, 1),
redirect_output=['out', 'err'],
cwd=os.path.abspath(procfile.root))
s.add_process(pname, cmd, **params)
print("%r has been loaded in %s" % (group, uri))
def find_groupname(self, g, s):
tries = 0
while True:
groups = s.groups()
if g not in groups:
return g
if tries > 3:
raise RuntimeError("%r is conflicting, try to pass a new one")
i = 0
while True:
g = "%s.%s" % (g, i)
if g not in groups:
break
tries += 1
|
normal
|
{
"blob_id": "eb5256543d6095668d6eeaf6cfdc9f744d7c73c5",
"index": 2267,
"step-1": "<mask token>\n\n\nclass Load(Command):\n <mask token>\n <mask token>\n <mask token>\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n",
"step-2": "<mask token>\n\n\nclass Load(Command):\n <mask token>\n <mask token>\n\n def run(self, procfile, pargs):\n args = pargs.args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = '.'\n if pargs.endpoint:\n uri = pargs.endpoint\n if not uri:\n uri = 'http://127.0.0.1:5000'\n if group == '.':\n group = procfile.get_groupname()\n s = Server(uri)\n group = self.find_groupname(group, s)\n concurrency = self.parse_concurrency(pargs)\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n pname = '%s:%s' % (group, name)\n params = dict(args=args, env=procfile.env, numprocesses=\n concurrency.get(name, 1), redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print('%r has been loaded in %s' % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n",
"step-3": "<mask token>\n\n\nclass Load(Command):\n \"\"\" Load a Procfile application to gafferd\n ======================================\n\n This command allows you to load your Procfile application\n in gafferd.\n\n Command line\n ------------\n\n $ gaffer load [name] [url]\n\n Arguments\n +++++++++\n\n *name* is the name of the group of process recoreded in gafferd.\n By default it will be the name of your project folder.You can use\n ``.`` to specify the current folder.\n\n *uri* is the url to connect to a gaffer node. By default\n 'http://127.0.0.1:5000'\n\n Options\n +++++++\n\n **--endpoint**\n\n Gaffer node URL to connect.\n\n \"\"\"\n name = 'load'\n\n def run(self, procfile, pargs):\n args = pargs.args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = '.'\n if pargs.endpoint:\n uri = pargs.endpoint\n if not uri:\n uri = 'http://127.0.0.1:5000'\n if group == '.':\n group = procfile.get_groupname()\n s = Server(uri)\n group = self.find_groupname(group, s)\n concurrency = self.parse_concurrency(pargs)\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n pname = '%s:%s' % (group, name)\n params = dict(args=args, env=procfile.env, numprocesses=\n concurrency.get(name, 1), redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print('%r has been loaded in %s' % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n",
"step-4": "import os\nfrom .base import Command\nfrom ...httpclient import Server\n\n\nclass Load(Command):\n \"\"\" Load a Procfile application to gafferd\n ======================================\n\n This command allows you to load your Procfile application\n in gafferd.\n\n Command line\n ------------\n\n $ gaffer load [name] [url]\n\n Arguments\n +++++++++\n\n *name* is the name of the group of process recoreded in gafferd.\n By default it will be the name of your project folder.You can use\n ``.`` to specify the current folder.\n\n *uri* is the url to connect to a gaffer node. By default\n 'http://127.0.0.1:5000'\n\n Options\n +++++++\n\n **--endpoint**\n\n Gaffer node URL to connect.\n\n \"\"\"\n name = 'load'\n\n def run(self, procfile, pargs):\n args = pargs.args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = '.'\n if pargs.endpoint:\n uri = pargs.endpoint\n if not uri:\n uri = 'http://127.0.0.1:5000'\n if group == '.':\n group = procfile.get_groupname()\n s = Server(uri)\n group = self.find_groupname(group, s)\n concurrency = self.parse_concurrency(pargs)\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n pname = '%s:%s' % (group, name)\n params = dict(args=args, env=procfile.env, numprocesses=\n concurrency.get(name, 1), redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print('%r has been loaded in %s' % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n if tries > 3:\n raise RuntimeError('%r is conflicting, try to pass a new one')\n i = 0\n while True:\n g = '%s.%s' % (g, i)\n if g not in groups:\n break\n tries += 1\n",
"step-5": "# -*- coding: utf-8 -\n#\n# This file is part of gaffer. See the NOTICE for more information.\n\nimport os\n\nfrom .base import Command\nfrom ...httpclient import Server\n\nclass Load(Command):\n \"\"\"\\\n Load a Procfile application to gafferd\n ======================================\n\n This command allows you to load your Procfile application\n in gafferd.\n\n Command line\n ------------\n\n $ gaffer load [name] [url]\n\n Arguments\n +++++++++\n\n *name* is the name of the group of process recoreded in gafferd.\n By default it will be the name of your project folder.You can use\n ``.`` to specify the current folder.\n\n *uri* is the url to connect to a gaffer node. By default\n 'http://127.0.0.1:5000'\n\n Options\n +++++++\n\n **--endpoint**\n\n Gaffer node URL to connect.\n\n \"\"\"\n\n name = \"load\"\n\n def run(self, procfile, pargs):\n args = pargs.args\n\n # get args\n uri = None\n if len(args) == 2:\n group = args[0]\n uri = args[1]\n elif len(args) == 1:\n group = args[0]\n else:\n group = \".\"\n\n if pargs.endpoint:\n uri = pargs.endpoint\n\n if not uri:\n uri = \"http://127.0.0.1:5000\"\n\n # get the default groupname\n if group == \".\":\n group = procfile.get_groupname()\n\n # create a server instance\n s = Server(uri)\n\n # finally manage group conflicts\n group = self.find_groupname(group, s)\n\n # parse the concurrency settings\n concurrency = self.parse_concurrency(pargs)\n\n # finally send the processes\n for name, cmd_str in procfile.processes():\n cmd, args = procfile.parse_cmd(cmd_str)\n\n pname = \"%s:%s\" % (group, name)\n params = dict(args=args, env=procfile.env,\n numprocesses=concurrency.get(name, 1),\n redirect_output=['out', 'err'],\n cwd=os.path.abspath(procfile.root))\n s.add_process(pname, cmd, **params)\n print(\"%r has been loaded in %s\" % (group, uri))\n\n def find_groupname(self, g, s):\n tries = 0\n while True:\n groups = s.groups()\n if g not in groups:\n return g\n\n if tries > 3:\n raise RuntimeError(\"%r is conflicting, try to pass a new one\")\n\n i = 0\n while True:\n g = \"%s.%s\" % (g, i)\n if g not in groups:\n break\n tries += 1\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
import os
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data as td
import torchvision as tv
import pandas as pd
from PIL import Image
from matplotlib import pyplot as plt
from utils import imshow, NNRegressor
class DnCNN(NNRegressor):
def __init__(self, D, C=64):
super(DnCNN, self).__init__()
self.D = D
# convolution layers
self.conv = nn.ModuleList()
self.conv.append(nn.Conv2d(3, C, 3, padding=1))
self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])
self.conv.append(nn.Conv2d(C, 3, 3, padding=1))
# apply He's initialization
for i in range(len(self.conv[:-1])):
nn.init.kaiming_normal_(
self.conv[i].weight.data, nonlinearity='relu')
# batch normalization
self.bn = nn.ModuleList()
self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])
# initialize the weights of the Batch normalization layers
for i in range(D):
nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))
def forward(self, x):
D = self.D
h = F.relu(self.conv[0](x))
for i in range(D):
h = F.relu(self.bn[i](self.conv[i+1](h)))
y = self.conv[D+1](h) + x
return y
class UDnCNN(NNRegressor):
def __init__(self, D, C=64):
super(UDnCNN, self).__init__()
self.D = D
# convolution layers
self.conv = nn.ModuleList()
self.conv.append(nn.Conv2d(3, C, 3, padding=1))
self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])
self.conv.append(nn.Conv2d(C, 3, 3, padding=1))
# apply He's initialization
for i in range(len(self.conv[:-1])):
nn.init.kaiming_normal_(
self.conv[i].weight.data, nonlinearity='relu')
# batch normalization
self.bn = nn.ModuleList()
self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])
# initialize the weights of the Batch normalization layers
for i in range(D):
nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))
def forward(self, x):
D = self.D
h = F.relu(self.conv[0](x))
h_buff = []
idx_buff = []
shape_buff = []
for i in range(D//2-1):
shape_buff.append(h.shape)
h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i+1](h))),
kernel_size=(2, 2), return_indices=True)
h_buff.append(h)
idx_buff.append(idx)
for i in range(D//2-1, D//2+1):
h = F.relu(self.bn[i](self.conv[i+1](h)))
for i in range(D//2+1, D):
j = i - (D // 2 + 1) + 1
h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i+1]((h+h_buff[-j])/np.sqrt(2)))),
idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j])
y = self.conv[D+1](h) + x
return y
class DUDnCNN(NNRegressor):
def __init__(self, D, C=64):
super(DUDnCNN, self).__init__()
self.D = D
# compute k(max_pool) and l(max_unpool)
k = [0]
k.extend([i for i in range(D//2)])
k.extend([k[-1] for _ in range(D//2, D+1)])
l = [0 for _ in range(D//2+1)]
l.extend([i for i in range(D+1-(D//2+1))])
l.append(l[-1])
# holes and dilations for convolution layers
holes = [2**(kl[0]-kl[1])-1 for kl in zip(k, l)]
dilations = [i+1 for i in holes]
# convolution layers
self.conv = nn.ModuleList()
self.conv.append(
nn.Conv2d(3, C, 3, padding=dilations[0], dilation=dilations[0]))
self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i+1],
dilation=dilations[i+1]) for i in range(D)])
self.conv.append(
nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation=dilations[-1]))
# apply He's initialization
for i in range(len(self.conv[:-1])):
nn.init.kaiming_normal_(
self.conv[i].weight.data, nonlinearity='relu')
# batch normalization
self.bn = nn.ModuleList()
self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])
# initialize the weights of the Batch normalization layers
for i in range(D):
nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))
def forward(self, x):
D = self.D
h = F.relu(self.conv[0](x))
h_buff = []
for i in range(D//2 - 1):
torch.backends.cudnn.benchmark = True
h = self.conv[i+1](h)
torch.backends.cudnn.benchmark = False
h = F.relu(self.bn[i](h))
h_buff.append(h)
for i in range(D//2 - 1, D//2 + 1):
torch.backends.cudnn.benchmark = True
h = self.conv[i+1](h)
torch.backends.cudnn.benchmark = False
h = F.relu(self.bn[i](h))
for i in range(D//2 + 1, D):
j = i - (D//2 + 1) + 1
torch.backends.cudnn.benchmark = True
h = self.conv[i+1]((h + h_buff[-j]) / np.sqrt(2))
torch.backends.cudnn.benchmark = False
h = F.relu(self.bn[i](h))
y = self.conv[D+1](h) + x
return y
|
normal
|
{
"blob_id": "9c60d82d42716abb036dc7297a2dca66f0508984",
"index": 7626,
"step-1": "<mask token>\n\n\nclass UDnCNN(NNRegressor):\n <mask token>\n <mask token>\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n",
"step-2": "<mask token>\n\n\nclass DnCNN(NNRegressor):\n <mask token>\n <mask token>\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n",
"step-3": "<mask token>\n\n\nclass DnCNN(NNRegressor):\n <mask token>\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n",
"step-4": "import os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.utils.data as td\nimport torchvision as tv\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom utils import imshow, NNRegressor\n\n\nclass DnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D // 2 - 1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i + 1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D // 2 - 1, D // 2 + 1):\n h = F.relu(self.bn[i](self.conv[i + 1](h)))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i + 1]((h +\n h_buff[-j]) / np.sqrt(2)))), idx_buff[-j], kernel_size=(2, \n 2), output_size=shape_buff[-j])\n y = self.conv[D + 1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n k = [0]\n k.extend([i for i in range(D // 2)])\n k.extend([k[-1] for _ in range(D // 2, D + 1)])\n l = [(0) for _ in range(D // 2 + 1)]\n l.extend([i for i in range(D + 1 - (D // 2 + 1))])\n l.append(l[-1])\n holes = [(2 ** (kl[0] - kl[1]) - 1) for kl in zip(k, l)]\n dilations = [(i + 1) for i in holes]\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=dilations[0], dilation=\n dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i + 1],\n dilation=dilations[i + 1]) for i in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation\n =dilations[-1]))\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(self.conv[i].weight.data, nonlinearity=\n 'relu')\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n for i in range(D // 2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n for i in range(D // 2 - 1, D // 2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n for i in range(D // 2 + 1, D):\n j = i - (D // 2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i + 1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n y = self.conv[D + 1](h) + x\n return y\n",
"step-5": "import os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.utils.data as td\nimport torchvision as tv\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom utils import imshow, NNRegressor\n\n\nclass DnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DnCNN, self).__init__()\n self.D = D\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n for i in range(D):\n h = F.relu(self.bn[i](self.conv[i+1](h)))\n y = self.conv[D+1](h) + x\n return y\n\n\nclass UDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(UDnCNN, self).__init__()\n self.D = D\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(nn.Conv2d(3, C, 3, padding=1))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=1) for _ in range(D)])\n self.conv.append(nn.Conv2d(C, 3, 3, padding=1))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n idx_buff = []\n shape_buff = []\n for i in range(D//2-1):\n shape_buff.append(h.shape)\n h, idx = F.max_pool2d(F.relu(self.bn[i](self.conv[i+1](h))),\n kernel_size=(2, 2), return_indices=True)\n h_buff.append(h)\n idx_buff.append(idx)\n for i in range(D//2-1, D//2+1):\n h = F.relu(self.bn[i](self.conv[i+1](h)))\n for i in range(D//2+1, D):\n j = i - (D // 2 + 1) + 1\n h = F.max_unpool2d(F.relu(self.bn[i](self.conv[i+1]((h+h_buff[-j])/np.sqrt(2)))),\n idx_buff[-j], kernel_size=(2, 2), output_size=shape_buff[-j])\n y = self.conv[D+1](h) + x\n return y\n\n\nclass DUDnCNN(NNRegressor):\n\n def __init__(self, D, C=64):\n super(DUDnCNN, self).__init__()\n self.D = D\n\n # compute k(max_pool) and l(max_unpool)\n k = [0]\n k.extend([i for i in range(D//2)])\n k.extend([k[-1] for _ in range(D//2, D+1)])\n l = [0 for _ in range(D//2+1)]\n l.extend([i for i in range(D+1-(D//2+1))])\n l.append(l[-1])\n\n # holes and dilations for convolution layers\n holes = [2**(kl[0]-kl[1])-1 for kl in zip(k, l)]\n dilations = [i+1 for i in holes]\n\n # convolution layers\n self.conv = nn.ModuleList()\n self.conv.append(\n nn.Conv2d(3, C, 3, padding=dilations[0], dilation=dilations[0]))\n self.conv.extend([nn.Conv2d(C, C, 3, padding=dilations[i+1],\n dilation=dilations[i+1]) for i in range(D)])\n self.conv.append(\n nn.Conv2d(C, 3, 3, padding=dilations[-1], dilation=dilations[-1]))\n # apply He's initialization\n for i in range(len(self.conv[:-1])):\n nn.init.kaiming_normal_(\n self.conv[i].weight.data, nonlinearity='relu')\n\n # batch normalization\n self.bn = nn.ModuleList()\n self.bn.extend([nn.BatchNorm2d(C, C) for _ in range(D)])\n # initialize the weights of the Batch normalization layers\n for i in range(D):\n nn.init.constant_(self.bn[i].weight.data, 1.25 * np.sqrt(C))\n\n def forward(self, x):\n D = self.D\n h = F.relu(self.conv[0](x))\n h_buff = []\n\n for i in range(D//2 - 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n h_buff.append(h)\n\n for i in range(D//2 - 1, D//2 + 1):\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1](h)\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n\n for i in range(D//2 + 1, D):\n j = i - (D//2 + 1) + 1\n torch.backends.cudnn.benchmark = True\n h = self.conv[i+1]((h + h_buff[-j]) / np.sqrt(2))\n torch.backends.cudnn.benchmark = False\n h = F.relu(self.bn[i](h))\n\n y = self.conv[D+1](h) + x\n return y\n",
"step-ids": [
4,
7,
8,
10,
11
]
}
|
[
4,
7,
8,
10,
11
] |
import datetime
class Schedule:
def __init__(self, start, end, name, other): # Constructor
self.start = self.str_convert(start) # Schedule start time (ex. 9:00)
self.end = self.str_convert(end) # Schedule end time (ex. 22:00)
self.name = name # Schedule name (ex. member name, final schedule, etc)
self.other = other # Schedule exceptions/"other"
self.array = self.create_array() # Schedule array (2D array of days of week (7) x half hour blocks)
def str_convert(self, str_time):
# Converts start/end time to datettime if entered as string
if isinstance(str_time, str):
str_time = datetime.datetime.strptime(str_time, '%H:%M')
return datetime.time(str_time.hour, str_time.minute)
return str_time
def create_array(self):
# Generate array from number of (30 minute) blocks
num_blocks = self.calculate_num_blocks(self.start, self.end)
return [[True for x in range(num_blocks)] for y in range(7)]
@staticmethod
def calculate_num_blocks(start, end):
# Determining size of array: get difference
total_hrs = end.hour - start.hour
total_mins = end.minute - start.minute
# Determining size of array: in 30 min blocks (rounded)
num_half_hr = int(total_mins/30)
num_blocks = 2 * total_hrs + num_half_hr
return num_blocks
# def get_time
def prep_visualize(self):
# Banner
print("\n######### VISUALIZING WEEK: " + self.name + " #########")
print(self.start, "-", self.end, "\n")
num_blocks = self.calculate_num_blocks(self.start, self.end)
days = ["S", "M", "T", "W", "R", "F", "S" ]
times = []
# Fill times column (from datetime obj)
# Convert to datetime.datetime object, add timedelta, convert back - arbitrary datetime.date(1, 1, 1)
dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)
for i in range(num_blocks):
num_blocks_i = datetime.timedelta(minutes=30*i)
combined = (dtdt + num_blocks_i).time()
times.append(combined.strftime("%H:%M"))
return days, times
def visualize(self):
days, times = self.prep_visualize()
# HEADER:
print("#####", end=" ")
for d in days: print("(" + d + ") ", end="")
print("#####")
# SCHEDULE:
for t in range(len(times)):
print(times[t], end=" ")
for d in range(7):
slot = self.array[d][t]
if slot is True: slot = " "
elif slot is False: slot = " x "
print(slot, end=" ")
print(times[t])
print()
def print_other(self):
print(self.name + "\t ", self.other.replace("\n", "; "))
class ExSchedule(Schedule):
def __init__(self, start, end, num_members, list_membs):
Schedule.__init__(self, start, end, "ExSched", None)
self.num_members = num_members
self.list_membs = list_membs
self.exarray = self.create_exarray()
def create_exarray(self):
num_blocks = Schedule.calculate_num_blocks(self.start, self.end)
return [[[True for z in range(self.num_members)] for x in range(num_blocks)] for y in range(7)]
def visualize(self):
days, times = Schedule.prep_visualize(self)
print("Members: "+ self.list_membs[:-2])
# HEADER:
print("##### ", end="")
# print(days)
# print(times)
for d in days:
num_spaces = len(self.exarray[0][1]) - 1
left_half = int(num_spaces / 2)
right_half = num_spaces - left_half
print("(", end="")
print(''.join([" " for x in range(left_half)]), end=d)
print(''.join([" " for x in range(right_half)]), end=")")
print(" #####")
# SCHEDULE:
for i in range(len(times)): # i: 0-26 (9:00) = m: 0-26 ([T,T,T])
print(times[i], end=" ")
for d in range(len(self.exarray)): # d: 0-6 (sun)
array = self.exarray[d][i]
print("[", end="")
for memb_avail in array:
print("-", end="") if memb_avail is True else print("*", end="")
print("]", end="")
print(" ", end=times[i]+"\n")
|
normal
|
{
"blob_id": "f56978d5738c2f8cb4ed5ce4f11d3aae6a9689b1",
"index": 4604,
"step-1": "<mask token>\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n <mask token>\n <mask token>\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n <mask token>\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-2": "<mask token>\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n <mask token>\n <mask token>\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n\n def print_other(self):\n print(self.name + '\\t ', self.other.replace('\\n', '; '))\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-3": "<mask token>\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n\n def str_convert(self, str_time):\n if isinstance(str_time, str):\n str_time = datetime.datetime.strptime(str_time, '%H:%M')\n return datetime.time(str_time.hour, str_time.minute)\n return str_time\n\n def create_array(self):\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n return [[(True) for x in range(num_blocks)] for y in range(7)]\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n\n def print_other(self):\n print(self.name + '\\t ', self.other.replace('\\n', '; '))\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-4": "import datetime\n\n\nclass Schedule:\n\n def __init__(self, start, end, name, other):\n self.start = self.str_convert(start)\n self.end = self.str_convert(end)\n self.name = name\n self.other = other\n self.array = self.create_array()\n\n def str_convert(self, str_time):\n if isinstance(str_time, str):\n str_time = datetime.datetime.strptime(str_time, '%H:%M')\n return datetime.time(str_time.hour, str_time.minute)\n return str_time\n\n def create_array(self):\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n return [[(True) for x in range(num_blocks)] for y in range(7)]\n\n @staticmethod\n def calculate_num_blocks(start, end):\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n num_half_hr = int(total_mins / 30)\n num_blocks = 2 * total_hrs + num_half_hr\n return num_blocks\n\n def prep_visualize(self):\n print('\\n######### VISUALIZING WEEK: ' + self.name + ' #########')\n print(self.start, '-', self.end, '\\n')\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = ['S', 'M', 'T', 'W', 'R', 'F', 'S']\n times = []\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30 * i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime('%H:%M'))\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n print('#####', end=' ')\n for d in days:\n print('(' + d + ') ', end='')\n print('#####')\n for t in range(len(times)):\n print(times[t], end=' ')\n for d in range(7):\n slot = self.array[d][t]\n if slot is True:\n slot = ' '\n elif slot is False:\n slot = ' x '\n print(slot, end=' ')\n print(times[t])\n print()\n\n def print_other(self):\n print(self.name + '\\t ', self.other.replace('\\n', '; '))\n\n\nclass ExSchedule(Schedule):\n\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, 'ExSched', None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[(True) for z in range(self.num_members)] for x in range(\n num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print('Members: ' + self.list_membs[:-2])\n print('##### ', end='')\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n print('(', end='')\n print(''.join([' ' for x in range(left_half)]), end=d)\n print(''.join([' ' for x in range(right_half)]), end=')')\n print(' #####')\n for i in range(len(times)):\n print(times[i], end=' ')\n for d in range(len(self.exarray)):\n array = self.exarray[d][i]\n print('[', end='')\n for memb_avail in array:\n print('-', end='') if memb_avail is True else print('*',\n end='')\n print(']', end='')\n print(' ', end=times[i] + '\\n')\n",
"step-5": "import datetime\n\nclass Schedule:\n def __init__(self, start, end, name, other): # Constructor\n self.start = self.str_convert(start) # Schedule start time (ex. 9:00)\n self.end = self.str_convert(end) # Schedule end time (ex. 22:00)\n self.name = name # Schedule name (ex. member name, final schedule, etc)\n self.other = other # Schedule exceptions/\"other\"\n self.array = self.create_array() # Schedule array (2D array of days of week (7) x half hour blocks)\n\n def str_convert(self, str_time):\n # Converts start/end time to datettime if entered as string\n if isinstance(str_time, str):\n str_time = datetime.datetime.strptime(str_time, '%H:%M')\n return datetime.time(str_time.hour, str_time.minute)\n return str_time\n\n def create_array(self):\n # Generate array from number of (30 minute) blocks\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n return [[True for x in range(num_blocks)] for y in range(7)]\n\n @staticmethod\n def calculate_num_blocks(start, end):\n # Determining size of array: get difference\n total_hrs = end.hour - start.hour\n total_mins = end.minute - start.minute\n\n # Determining size of array: in 30 min blocks (rounded)\n num_half_hr = int(total_mins/30)\n num_blocks = 2 * total_hrs + num_half_hr\n\n return num_blocks\n\n # def get_time\n\n def prep_visualize(self):\n # Banner\n print(\"\\n######### VISUALIZING WEEK: \" + self.name + \" #########\")\n print(self.start, \"-\", self.end, \"\\n\")\n\n num_blocks = self.calculate_num_blocks(self.start, self.end)\n days = [\"S\", \"M\", \"T\", \"W\", \"R\", \"F\", \"S\" ]\n times = []\n\n # Fill times column (from datetime obj)\n # Convert to datetime.datetime object, add timedelta, convert back - arbitrary datetime.date(1, 1, 1)\n dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)\n for i in range(num_blocks):\n num_blocks_i = datetime.timedelta(minutes=30*i)\n combined = (dtdt + num_blocks_i).time()\n times.append(combined.strftime(\"%H:%M\"))\n\n return days, times\n\n def visualize(self):\n days, times = self.prep_visualize()\n\n # HEADER:\n print(\"#####\", end=\" \")\n for d in days: print(\"(\" + d + \") \", end=\"\")\n print(\"#####\")\n\n # SCHEDULE:\n for t in range(len(times)):\n print(times[t], end=\" \")\n for d in range(7):\n slot = self.array[d][t]\n if slot is True: slot = \" \"\n elif slot is False: slot = \" x \"\n print(slot, end=\" \")\n print(times[t])\n print()\n\n def print_other(self): \n print(self.name + \"\\t \", self.other.replace(\"\\n\", \"; \"))\n\n\nclass ExSchedule(Schedule):\n def __init__(self, start, end, num_members, list_membs):\n Schedule.__init__(self, start, end, \"ExSched\", None)\n self.num_members = num_members\n self.list_membs = list_membs\n self.exarray = self.create_exarray()\n\n def create_exarray(self):\n num_blocks = Schedule.calculate_num_blocks(self.start, self.end)\n return [[[True for z in range(self.num_members)] for x in range(num_blocks)] for y in range(7)]\n\n def visualize(self):\n days, times = Schedule.prep_visualize(self)\n print(\"Members: \"+ self.list_membs[:-2])\n\n # HEADER:\n print(\"##### \", end=\"\")\n # print(days)\n # print(times)\n for d in days:\n num_spaces = len(self.exarray[0][1]) - 1\n left_half = int(num_spaces / 2)\n right_half = num_spaces - left_half\n\n print(\"(\", end=\"\")\n print(''.join([\" \" for x in range(left_half)]), end=d)\n print(''.join([\" \" for x in range(right_half)]), end=\")\")\n print(\" #####\")\n\n # SCHEDULE:\n for i in range(len(times)): # i: 0-26 (9:00) = m: 0-26 ([T,T,T])\n print(times[i], end=\" \")\n for d in range(len(self.exarray)): # d: 0-6 (sun)\n array = self.exarray[d][i]\n print(\"[\", end=\"\")\n for memb_avail in array:\n print(\"-\", end=\"\") if memb_avail is True else print(\"*\", end=\"\")\n print(\"]\", end=\"\")\n print(\" \", end=times[i]+\"\\n\")\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
from .feature import slide_show
def main(args=None):
if args:
slide_show(args[0])
|
normal
|
{
"blob_id": "8680c033662a89ed6fc73e65ec544b93558c4208",
"index": 688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args=None):\n if args:\n slide_show(args[0])\n",
"step-3": "from .feature import slide_show\n\n\ndef main(args=None):\n if args:\n slide_show(args[0])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class predict_guitar:
<|reserved_special_token_0|>
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class predict_guitar:
<|reserved_special_token_0|>
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
def predict(self, url):
"""Generating prediction of image url"""
response = requests.get(url)
img = Image.open(BytesIO(response.content))
transform = transforms.Compose([transforms.Grayscale(), transforms.
Resize((128, 128)), transforms.ToTensor()])
img = transform(img).unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
out = self.model(img)
classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',
'Stratocaster', 'Telecaster']
if torch.cuda.is_available():
logs = out.cpu().data.numpy()
else:
logs = out.data.numpy()
return [classes[logs.argmax()]]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class predict_guitar:
def __init__(self):
"""Model is loaded on init of the class"""
self.model = Net()
if torch.cuda.is_available():
map_location = torch.device('cuda')
else:
map_location = torch.device('cpu')
self.model.load_state_dict(torch.load('model.pt', map_location=
map_location))
if torch.cuda.is_available():
self.model.cuda()
else:
self.model.cpu()
self.model.eval()
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
def predict(self, url):
"""Generating prediction of image url"""
response = requests.get(url)
img = Image.open(BytesIO(response.content))
transform = transforms.Compose([transforms.Grayscale(), transforms.
Resize((128, 128)), transforms.ToTensor()])
img = transform(img).unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
out = self.model(img)
classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',
'Stratocaster', 'Telecaster']
if torch.cuda.is_available():
logs = out.cpu().data.numpy()
else:
logs = out.data.numpy()
return [classes[logs.argmax()]]
<|reserved_special_token_1|>
import torch
from torchvision import datasets, transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
import requests
from io import BytesIO
from net import Net
class predict_guitar:
def __init__(self):
"""Model is loaded on init of the class"""
self.model = Net()
if torch.cuda.is_available():
map_location = torch.device('cuda')
else:
map_location = torch.device('cpu')
self.model.load_state_dict(torch.load('model.pt', map_location=
map_location))
if torch.cuda.is_available():
self.model.cuda()
else:
self.model.cpu()
self.model.eval()
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
def predict(self, url):
"""Generating prediction of image url"""
response = requests.get(url)
img = Image.open(BytesIO(response.content))
transform = transforms.Compose([transforms.Grayscale(), transforms.
Resize((128, 128)), transforms.ToTensor()])
img = transform(img).unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
out = self.model(img)
classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',
'Stratocaster', 'Telecaster']
if torch.cuda.is_available():
logs = out.cpu().data.numpy()
else:
logs = out.data.numpy()
return [classes[logs.argmax()]]
<|reserved_special_token_1|>
import torch
from torchvision import datasets, transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
import requests
from io import BytesIO
from net import Net
class predict_guitar():
def __init__(self):
"""Model is loaded on init of the class"""
self.model = Net()
if torch.cuda.is_available():
map_location=torch.device('cuda')
else:
map_location=torch.device('cpu')
# load parameters
self.model.load_state_dict(torch.load('model.pt',
map_location=map_location))
if torch.cuda.is_available():
self.model.cuda()
else:
self.model.cpu()
self.model.eval()
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
def predict(self,url):
"""Generating prediction of image url"""
# get image
response = requests.get(url)
img = Image.open(BytesIO(response.content))
transform = transforms.Compose([transforms.Grayscale(),
transforms.Resize((128,128)),
transforms.ToTensor()])
img = transform(img).unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
out = self.model(img)
classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',
'Stratocaster','Telecaster']
if torch.cuda.is_available():
logs = out.cpu().data.numpy()
else:
logs = out.data.numpy()
return [classes[logs.argmax()]]
|
flexible
|
{
"blob_id": "8743be809953f59bd14431e509042c4c51d9fab4",
"index": 4175,
"step-1": "<mask token>\n\n\nclass predict_guitar:\n <mask token>\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass predict_guitar:\n <mask token>\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-3": "<mask token>\n\n\nclass predict_guitar:\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n self.model = Net()\n if torch.cuda.is_available():\n map_location = torch.device('cuda')\n else:\n map_location = torch.device('cpu')\n self.model.load_state_dict(torch.load('model.pt', map_location=\n map_location))\n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-4": "import torch\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nfrom net import Net\n\n\nclass predict_guitar:\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n self.model = Net()\n if torch.cuda.is_available():\n map_location = torch.device('cuda')\n else:\n map_location = torch.device('cpu')\n self.model.load_state_dict(torch.load('model.pt', map_location=\n map_location))\n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-5": "import torch\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nfrom net import Net\n\nclass predict_guitar():\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self,url):\n \"\"\"Generating prediction of image url\"\"\"\n\n # get image\n response = requests.get(url)\n \n img = Image.open(BytesIO(response.content))\n\n transform = transforms.Compose([transforms.Grayscale(),\n transforms.Resize((128,128)),\n transforms.ToTensor()])\n\n img = transform(img).unsqueeze(0)\n\n if torch.cuda.is_available(): \n img = img.cuda() \n\n out = self.model(img)\n\n classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster','Telecaster']\n\n if torch.cuda.is_available():\n\n logs = out.cpu().data.numpy()\n \n else:\n\n logs = out.data.numpy()\n \n return [classes[logs.argmax()]]\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django import forms
from crawlr.models import Route, Category, UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128,
help_text = "Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = ('name',)
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128,
help_text = "Please enter the name of the Crawl")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
#Hidden inputs for the variables retrieved from find directions page
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
#Location choice, a drop down menu selection
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts', 'created_by')
def clean_title(self):
title = self.cleaned_data["title"]
try:
Route.objects.get(title=title)
raise forms.ValidationError(
self.error_messages['duplicate_title'], # customized error message
code='duplicate_title',
)
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder' : 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email'}), label='', required=False)
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('picture',)
|
normal
|
{
"blob_id": "abf25cf3d4435754b916fa06e5e887b1e3589a1c",
"index": 5073,
"step-1": "<mask token>\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-2": "<mask token>\n\n\nclass CategoryForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-3": "<mask token>\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128, help_text=\n 'Please enter the category name.')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-4": "from django import forms\nfrom crawlr.models import Route, Category, UserProfile\nfrom django.contrib.auth.models import User\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128, help_text=\n 'Please enter the category name.')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-5": "from django import forms\nfrom crawlr.models import Route, Category, UserProfile\nfrom django.contrib.auth.models import User\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128,\n help_text = \"Please enter the category name.\")\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n class Meta:\n model = Category\n fields = ('name',)\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128,\n help_text = \"Please enter the name of the Crawl\")\n\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n #Hidden inputs for the variables retrieved from find directions page\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n #Location choice, a drop down menu selection\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts', 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data[\"title\"]\n try:\n Route.objects.get(title=title)\n\n raise forms.ValidationError(\n self.error_messages['duplicate_title'], # customized error message\n code='duplicate_title',\n )\n except Route.DoesNotExist:\n return title\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder' : 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email'}), label='', required=False)\n\n class Meta:\n model = User\n fields = ('username', 'email', 'password')\n\nclass UserProfileForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('picture',)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
class Solution:
# complexity: 2*n^2 + 4*n^2 -> 8*n^2
def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:
# complexity: n*2*l where l is the length of the word -> 2*n^2
words_freq = {
word: word.count(min(word)) for word in words
}
queries_freq = {}
ans = []
# complexity: q*4*n where q is the length of queries -> 4n^2
for query in queries:
if query in queries_freq:
ans.append(queries_freq[query])
continue
# complexity: 2*l where l is the length of the word -> 2*n
query_freq = query.count(min(query))
# complexity: n*n due the iteration and the sum -> 2*n
num = sum([1 if query_freq < words_freq[word]
else 0 for word in words])
ans.append(num)
queries_freq[query] = num
return ans
|
normal
|
{
"blob_id": "e9918f4fac2e13b36d9b20ffc28dc6508aad6f9b",
"index": 2159,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def numSmallerByFrequency(self, queries: List[str], words: List[str]\n ) ->List[int]:\n words_freq = {word: word.count(min(word)) for word in words}\n queries_freq = {}\n ans = []\n for query in queries:\n if query in queries_freq:\n ans.append(queries_freq[query])\n continue\n query_freq = query.count(min(query))\n num = sum([(1 if query_freq < words_freq[word] else 0) for word in\n words])\n ans.append(num)\n queries_freq[query] = num\n return ans\n",
"step-4": "class Solution:\n # complexity: 2*n^2 + 4*n^2 -> 8*n^2\n def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:\n # complexity: n*2*l where l is the length of the word -> 2*n^2\n words_freq = {\n word: word.count(min(word)) for word in words\n }\n\n queries_freq = {}\n\n ans = []\n\n # complexity: q*4*n where q is the length of queries -> 4n^2\n for query in queries:\n if query in queries_freq:\n ans.append(queries_freq[query])\n continue\n\n # complexity: 2*l where l is the length of the word -> 2*n\n query_freq = query.count(min(query))\n # complexity: n*n due the iteration and the sum -> 2*n\n num = sum([1 if query_freq < words_freq[word]\n else 0 for word in words])\n ans.append(num)\n queries_freq[query] = num\n\n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import helper
__author__ = 'AdrianLeo'
helper.greeting("Hey, dummy")
|
normal
|
{
"blob_id": "03156992355a756b2ae38735a98251eb611d4245",
"index": 2611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhelper.greeting('Hey, dummy')\n",
"step-3": "<mask token>\n__author__ = 'AdrianLeo'\nhelper.greeting('Hey, dummy')\n",
"step-4": "import helper\n__author__ = 'AdrianLeo'\nhelper.greeting('Hey, dummy')\n",
"step-5": "import helper\n\n__author__ = 'AdrianLeo'\n\nhelper.greeting(\"Hey, dummy\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _sym, _val in locals().items():
if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):
winEventIDsToEventNames[_val] = _sym
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CHILDID_SELF = 0
IA2_TEXT_OFFSET_LENGTH = -1
IA2_TEXT_OFFSET_CARET = -2
ROLE_SYSTEM_ALERT = 8
ROLE_SYSTEM_ANIMATION = 54
ROLE_SYSTEM_APPLICATION = 14
ROLE_SYSTEM_BORDER = 19
ROLE_SYSTEM_BUTTONDROPDOWN = 56
ROLE_SYSTEM_BUTTONDROPDOWNGRID = 58
ROLE_SYSTEM_BUTTONMENU = 57
ROLE_SYSTEM_CARET = 7
ROLE_SYSTEM_CELL = 29
ROLE_SYSTEM_CHARACTER = 32
ROLE_SYSTEM_CHART = 17
ROLE_SYSTEM_CHECKBUTTON = 44
ROLE_SYSTEM_CLIENT = 10
ROLE_SYSTEM_CLOCK = 61
ROLE_SYSTEM_COLUMN = 27
ROLE_SYSTEM_COLUMNHEADER = 25
ROLE_SYSTEM_COMBOBOX = 46
ROLE_SYSTEM_CURSOR = 6
ROLE_SYSTEM_DIAGRAM = 53
ROLE_SYSTEM_DIAL = 49
ROLE_SYSTEM_DIALOG = 18
ROLE_SYSTEM_DOCUMENT = 15
ROLE_SYSTEM_DROPLIST = 47
ROLE_SYSTEM_EQUATION = 55
ROLE_SYSTEM_GRAPHIC = 40
ROLE_SYSTEM_GRIP = 4
ROLE_SYSTEM_GROUPING = 20
ROLE_SYSTEM_HELPBALLOON = 31
ROLE_SYSTEM_HOTKEYFIELD = 50
ROLE_SYSTEM_INDICATOR = 39
ROLE_SYSTEM_LINK = 30
ROLE_SYSTEM_LIST = 33
ROLE_SYSTEM_LISTITEM = 34
ROLE_SYSTEM_MENUBAR = 2
ROLE_SYSTEM_MENUITEM = 12
ROLE_SYSTEM_MENUPOPUP = 11
ROLE_SYSTEM_OUTLINE = 35
ROLE_SYSTEM_OUTLINEITEM = 36
ROLE_SYSTEM_PAGETAB = 37
ROLE_SYSTEM_PAGETABLIST = 60
ROLE_SYSTEM_PANE = 16
ROLE_SYSTEM_PROGRESSBAR = 48
ROLE_SYSTEM_PROPERTYPAGE = 38
ROLE_SYSTEM_PUSHBUTTON = 43
ROLE_SYSTEM_RADIOBUTTON = 45
ROLE_SYSTEM_ROW = 28
ROLE_SYSTEM_ROWHEADER = 26
ROLE_SYSTEM_SCROLLBAR = 3
ROLE_SYSTEM_SEPARATOR = 21
ROLE_SYSTEM_SLIDER = 51
ROLE_SYSTEM_SOUND = 5
ROLE_SYSTEM_SPINBUTTON = 52
ROLE_SYSTEM_STATICTEXT = 41
ROLE_SYSTEM_STATUSBAR = 23
ROLE_SYSTEM_TABLE = 24
ROLE_SYSTEM_TEXT = 42
ROLE_SYSTEM_TITLEBAR = 1
ROLE_SYSTEM_TOOLBAR = 22
ROLE_SYSTEM_TOOLTIP = 13
ROLE_SYSTEM_WHITESPACE = 59
ROLE_SYSTEM_WINDOW = 9
IA2_ROLE_UNKNOWN = 0
IA2_ROLE_CANVAS = 1025
IA2_ROLE_CAPTION = 1026
IA2_ROLE_CHECK_MENU_ITEM = 1027
IA2_ROLE_COLOR_CHOOSER = 1028
IA2_ROLE_DATE_EDITOR = 1029
IA2_ROLE_DESKTOP_ICON = 1030
IA2_ROLE_DESKTOP_PANE = 1031
IA2_ROLE_DIRECTORY_PANE = 1032
IA2_ROLE_EDITBAR = 1033
IA2_ROLE_EMBEDDED_OBJECT = 1034
IA2_ROLE_ENDNOTE = 1035
IA2_ROLE_FILE_CHOOSER = 1036
IA2_ROLE_FONT_CHOOSER = 1037
IA2_ROLE_FOOTER = 1038
IA2_ROLE_FOOTNOTE = 1039
IA2_ROLE_FORM = 1040
IA2_ROLE_FRAME = 1041
IA2_ROLE_GLASS_PANE = 1042
IA2_ROLE_HEADER = 1043
IA2_ROLE_HEADING = 1044
IA2_ROLE_ICON = 1045
IA2_ROLE_IMAGE_MAP = 1046
IA2_ROLE_INPUT_METHOD_WINDOW = 1047
IA2_ROLE_INTERNAL_FRAME = 1048
IA2_ROLE_LABEL = 1049
IA2_ROLE_LAYERED_PANE = 1050
IA2_ROLE_NOTE = 1051
IA2_ROLE_OPTION_PANE = 1052
IA2_ROLE_PAGE = 1053
IA2_ROLE_PARAGRAPH = 1054
IA2_ROLE_RADIO_MENU_ITEM = 1055
IA2_ROLE_REDUNDANT_OBJECT = 1056
IA2_ROLE_ROOT_PANE = 1057
IA2_ROLE_RULER = 1058
IA2_ROLE_SCROLL_PANE = 1059
IA2_ROLE_SECTION = 1060
IA2_ROLE_SHAPE = 1061
IA2_ROLE_SPLIT_PANE = 1062
IA2_ROLE_TEAR_OFF_MENU = 1063
IA2_ROLE_TERMINAL = 1064
IA2_ROLE_TEXT_FRAME = 1065
IA2_ROLE_TOGGLE_BUTTON = 1066
IA2_ROLE_VIEW_PORT = 1067
IA2_ROLE_COMPLEMENTARY_CONTENT = 1068
IA2_ROLE_LANDMARK = 1069
UNLOCALIZED_ROLE_NAMES = {(1): u'ROLE_SYSTEM_TITLEBAR', (2):
u'ROLE_SYSTEM_MENUBAR', (3): u'ROLE_SYSTEM_SCROLLBAR', (4):
u'ROLE_SYSTEM_GRIP', (5): u'ROLE_SYSTEM_SOUND', (6):
u'ROLE_SYSTEM_CURSOR', (7): u'ROLE_SYSTEM_CARET', (8):
u'ROLE_SYSTEM_ALERT', (9): u'ROLE_SYSTEM_WINDOW', (10):
u'ROLE_SYSTEM_CLIENT', (11): u'ROLE_SYSTEM_MENUPOPUP', (12):
u'ROLE_SYSTEM_MENUITEM', (13): u'ROLE_SYSTEM_TOOLTIP', (14):
u'ROLE_SYSTEM_APPLICATION', (15): u'ROLE_SYSTEM_DOCUMENT', (16):
u'ROLE_SYSTEM_PANE', (17): u'ROLE_SYSTEM_CHART', (18):
u'ROLE_SYSTEM_DIALOG', (19): u'ROLE_SYSTEM_BORDER', (20):
u'ROLE_SYSTEM_GROUPING', (21): u'ROLE_SYSTEM_SEPARATOR', (22):
u'ROLE_SYSTEM_TOOLBAR', (23): u'ROLE_SYSTEM_STATUSBAR', (24):
u'ROLE_SYSTEM_TABLE', (25): u'ROLE_SYSTEM_COLUMNHEADER', (26):
u'ROLE_SYSTEM_ROWHEADER', (27): u'ROLE_SYSTEM_COLUMN', (28):
u'ROLE_SYSTEM_ROW', (29): u'ROLE_SYSTEM_CELL', (30):
u'ROLE_SYSTEM_LINK', (31): u'ROLE_SYSTEM_HELPBALLOON', (32):
u'ROLE_SYSTEM_CHARACTER', (33): u'ROLE_SYSTEM_LIST', (34):
u'ROLE_SYSTEM_LISTITEM', (35): u'ROLE_SYSTEM_OUTLINE', (36):
u'ROLE_SYSTEM_OUTLINEITEM', (37): u'ROLE_SYSTEM_PAGETAB', (38):
u'ROLE_SYSTEM_PROPERTYPAGE', (39): u'ROLE_SYSTEM_INDICATOR', (40):
u'ROLE_SYSTEM_GRAPHIC', (41): u'ROLE_SYSTEM_STATICTEXT', (42):
u'ROLE_SYSTEM_TEXT', (43): u'ROLE_SYSTEM_PUSHBUTTON', (44):
u'ROLE_SYSTEM_CHECKBUTTON', (45): u'ROLE_SYSTEM_RADIOBUTTON', (46):
u'ROLE_SYSTEM_COMBOBOX', (47): u'ROLE_SYSTEM_DROPLIST', (48):
u'ROLE_SYSTEM_PROGRESSBAR', (49): u'ROLE_SYSTEM_DIAL', (50):
u'ROLE_SYSTEM_HOTKEYFIELD', (51): u'ROLE_SYSTEM_SLIDER', (52):
u'ROLE_SYSTEM_SPINBUTTON', (53): u'ROLE_SYSTEM_DIAGRAM', (54):
u'ROLE_SYSTEM_ANIMATION', (55): u'ROLE_SYSTEM_EQUATION', (56):
u'ROLE_SYSTEM_BUTTONDROPDOWN', (57): u'ROLE_SYSTEM_BUTTONMENU', (58):
u'ROLE_SYSTEM_BUTTONDROPDOWNGRID', (59): u'ROLE_SYSTEM_WHITESPACE', (60
): u'ROLE_SYSTEM_PAGETABLIST', (61): u'ROLE_SYSTEM_CLOCK'}
UNLOCALIZED_IA2_ROLE_NAMES = {(0): u'IA2_ROLE_UNKNOWN', (1025):
u'IA2_ROLE_CANVAS', (1026): u'IA2_ROLE_CAPTION', (1027):
u'IA2_ROLE_CHECK_MENU_ITEM', (1028): u'IA2_ROLE_COLOR_CHOOSER', (1029):
u'IA2_ROLE_DATE_EDITOR', (1030): u'IA2_ROLE_DESKTOP_ICON', (1031):
u'IA2_ROLE_DESKTOP_PANE', (1032): u'IA2_ROLE_DIRECTORY_PANE', (1033):
u'IA2_ROLE_EDITBAR', (1034): u'IA2_ROLE_EMBEDDED_OBJECT', (1035):
u'IA2_ROLE_ENDNOTE', (1036): u'IA2_ROLE_FILE_CHOOSER', (1037):
u'IA2_ROLE_FONT_CHOOSER', (1038): u'IA2_ROLE_FOOTER', (1039):
u'IA2_ROLE_FOOTNOTE', (1040): u'IA2_ROLE_FORM', (1041):
u'IA2_ROLE_FRAME', (1042): u'IA2_ROLE_GLASS_PANE', (1043):
u'IA2_ROLE_HEADER', (1044): u'IA2_ROLE_HEADING', (1045):
u'IA2_ROLE_ICON', (1046): u'IA2_ROLE_IMAGE_MAP', (1047):
u'IA2_ROLE_INPUT_METHOD_WINDOW', (1048): u'IA2_ROLE_INTERNAL_FRAME', (
1049): u'IA2_ROLE_LABEL', (1050): u'IA2_ROLE_LAYERED_PANE', (1051):
u'IA2_ROLE_NOTE', (1052): u'IA2_ROLE_OPTION_PANE', (1053):
u'IA2_ROLE_PAGE', (1054): u'IA2_ROLE_PARAGRAPH', (1055):
u'IA2_ROLE_RADIO_MENU_ITEM', (1056): u'IA2_ROLE_REDUNDANT_OBJECT', (
1057): u'IA2_ROLE_ROOT_PANE', (1058): u'IA2_ROLE_RULER', (1059):
u'IA2_ROLE_SCROLL_PANE', (1060): u'IA2_ROLE_SECTION', (1061):
u'IA2_ROLE_SHAPE', (1062): u'IA2_ROLE_SPLIT_PANE', (1063):
u'IA2_ROLE_TEAR_OFF_MENU', (1064): u'IA2_ROLE_TERMINAL', (1065):
u'IA2_ROLE_TEXT_FRAME', (1066): u'IA2_ROLE_TOGGLE_BUTTON', (1067):
u'IA2_ROLE_VIEW_PORT', (1068): u'IA2_ROLE_COMPLEMENTARY_CONTENT', (1069
): u'IA2_ROLE_LANDMARK'}
NAVDIR_DOWN = 2
NAVDIR_FIRSTCHILD = 7
NAVDIR_LASTCHILD = 8
NAVDIR_LEFT = 3
NAVDIR_NEXT = 5
NAVDIR_PREVIOUS = 6
NAVDIR_RIGHT = 4
NAVDIR_UP = 1
STATE_SYSTEM_UNAVAILABLE = 1
STATE_SYSTEM_SELECTED = 2
STATE_SYSTEM_FOCUSED = 4
STATE_SYSTEM_PRESSED = 8
STATE_SYSTEM_CHECKED = 16
STATE_SYSTEM_MIXED = 32
STATE_SYSTEM_READONLY = 64
STATE_SYSTEM_HOTTRACKED = 128
STATE_SYSTEM_DEFAULT = 256
STATE_SYSTEM_EXPANDED = 512
STATE_SYSTEM_COLLAPSED = 1024
STATE_SYSTEM_BUSY = 2048
STATE_SYSTEM_FLOATING = 4096
STATE_SYSTEM_MARQUEED = 8192
STATE_SYSTEM_ANIMATED = 16384
STATE_SYSTEM_INVISIBLE = 32768
STATE_SYSTEM_OFFSCREEN = 65536
STATE_SYSTEM_SIZEABLE = 131072
STATE_SYSTEM_MOVEABLE = 262144
STATE_SYSTEM_SELFVOICING = 524288
STATE_SYSTEM_FOCUSABLE = 1048576
STATE_SYSTEM_SELECTABLE = 2097152
STATE_SYSTEM_LINKED = 4194304
STATE_SYSTEM_TRAVERSED = 8388608
STATE_SYSTEM_MULTISELECTABLE = 16777216
STATE_SYSTEM_EXTSELECTABLE = 33554432
STATE_SYSTEM_HASSUBMENU = 67108864
STATE_SYSTEM_ALERT_LOW = 67108864
STATE_SYSTEM_ALERT_MEDIUM = 134217728
STATE_SYSTEM_ALERT_HIGH = 268435456
STATE_SYSTEM_PROTECTED = 536870912
STATE_SYSTEM_HASPOPUP = 1073741824
STATE_SYSTEM_VALID = 536870911
UNLOCALIZED_STATE_NAMES = {(1): u'STATE_SYSTEM_UNAVAILABLE', (2):
u'STATE_SYSTEM_SELECTED', (4): u'STATE_SYSTEM_FOCUSED', (8):
u'STATE_SYSTEM_PRESSED', (16): u'STATE_SYSTEM_CHECKED', (32):
u'STATE_SYSTEM_MIXED', (64): u'STATE_SYSTEM_READONLY', (128):
u'STATE_SYSTEM_HOTTRACKED', (256): u'STATE_SYSTEM_DEFAULT', (512):
u'STATE_SYSTEM_EXPANDED', (1024): u'STATE_SYSTEM_COLLAPSED', (2048):
u'STATE_SYSTEM_BUSY', (4096): u'STATE_SYSTEM_FLOATING', (8192):
u'STATE_SYSTEM_MARQUEED', (16384): u'STATE_SYSTEM_ANIMATED', (32768):
u'STATE_SYSTEM_INVISIBLE', (65536): u'STATE_SYSTEM_OFFSCREEN', (131072):
u'STATE_SYSTEM_SIZEABLE', (262144): u'STATE_SYSTEM_MOVEABLE', (524288):
u'STATE_SYSTEM_SELFVOICING', (1048576): u'STATE_SYSTEM_FOCUSABLE', (
2097152): u'STATE_SYSTEM_SELECTABLE', (4194304): u'STATE_SYSTEM_LINKED',
(8388608): u'STATE_SYSTEM_TRAVERSED', (16777216):
u'STATE_SYSTEM_MULTISELECTABLE', (33554432):
u'STATE_SYSTEM_EXTSELECTABLE', (67108864): u'STATE_SYSTEM_ALERT_LOW', (
134217728): u'STATE_SYSTEM_ALERT_MEDIUM', (268435456):
u'STATE_SYSTEM_ALERT_HIGH', (536870912): u'STATE_SYSTEM_PROTECTED', (
1073741824): u'STATE_SYSTEM_HASPOPUP', (536870911): u'STATE_SYSTEM_VALID'}
IA2_STATE_ACTIVE = 1
IA2_STATE_ARMED = 2
IA2_STATE_DEFUNCT = 4
IA2_STATE_EDITABLE = 8
IA2_STATE_HORIZONTAL = 16
IA2_STATE_ICONIFIED = 32
IA2_STATE_INVALID_ENTRY = 64
IA2_STATE_MANAGES_DESCENDANTS = 128
IA2_STATE_MODAL = 256
IA2_STATE_MULTI_LINE = 512
IA2_STATE_OPAQUE = 1024
IA2_STATE_REQUIRED = 2048
IA2_STATE_SELECTABLE_TEXT = 4096
IA2_STATE_SINGLE_LINE = 8192
IA2_STATE_STALE = 16384
IA2_STATE_SUPPORTS_AUTOCOMPLETION = 32768
IA2_STATE_TRANSIENT = 65536
IA2_STATE_VERTICAL = 131072
IA2_STATE_CHECKABLE = 262144
IA2_STATE_PINNED = 524288
UNLOCALIZED_IA2_STATE_NAMES = {(1): u'IA2_STATE_ACTIVE', (2):
u'IA2_STATE_ARMED', (4): u'IA2_STATE_DEFUNCT', (8):
u'IA2_STATE_EDITABLE', (16): u'IA2_STATE_HORIZONTAL', (32):
u'IA2_STATE_ICONIFIED', (64): u'IA2_STATE_INVALID_ENTRY', (128):
u'IA2_STATE_MANAGES_DESCENDANTS', (256): u'IA2_STATE_MODAL', (512):
u'IA2_STATE_MULTI_LINE', (1024): u'IA2_STATE_OPAQUE', (2048):
u'IA2_STATE_REQUIRED', (4096): u'IA2_STATE_SELECTABLE_TEXT', (8192):
u'IA2_STATE_SINGLE_LINE', (16384): u'IA2_STATE_STALE', (32768):
u'IA2_STATE_SUPPORTS_AUTOCOMPLETION', (65536): u'IA2_STATE_TRANSIENT',
(131072): u'IA2_STATE_VERTICAL', (262144): u'IA2_STATE_CHECKABLE', (
524288): u'IA2_STATE_PINNED'}
UNLOCALIZED_IA2_RELATION_TYPES = {u'containingApplication':
u'IA2_RELATION_CONTAINING_APPLICATION', u'containingDocument':
u'IA2_RELATION_CONTAINING_DOCUMENT', u'containingTabPane':
u'IA2_RELATION_CONTAINING_TAB_PANE', u'containingWindow':
u'IA2_RELATION_CONTAINING_WINDOW', u'controlledBy':
u'IA2_RELATION_CONTROLLED_BY', u'controllerFor':
u'IA2_RELATION_CONTROLLER_FOR', u'describedBy':
u'IA2_RELATION_DESCRIBED_BY', u'descriptionFor':
u'IA2_RELATION_DESCRIPTION_FOR', u'details': u'IA2_RELATION_DETAILS',
u'detailsFor': u'IA2_RELATION_DETAILS_FOR', u'embeddedBy':
u'IA2_RELATION_EMBEDDED_BY', u'embeds': u'IA2_RELATION_EMBEDS',
u'errorMessage': u'IA2_RELATION_ERROR_MESSAGE', u'errorFor':
u'IA2_RELATION_ERROR_FOR', u'flowsFrom': u'IA2_RELATION_FLOWS_FROM',
u'flowsTo': u'IA2_RELATION_FLOWS_TO', u'labelFor':
u'IA2_RELATION_LABEL_FOR', u'labelledBy': u'IA2_RELATION_LABELED_BY',
u'labelledBy': u'IA2_RELATION_LABELLED_BY', u'memberOf':
u'IA2_RELATION_MEMBER_OF', u'nextTabbable':
u'IA2_RELATION_NEXT_TABBABLE', u'nodeChildOf':
u'IA2_RELATION_NODE_CHILD_OF', u'nodeParentOf':
u'IA2_RELATION_NODE_PARENT_OF', u'parentWindowOf':
u'IA2_RELATION_PARENT_WINDOW_OF', u'popupFor':
u'IA2_RELATION_POPUP_FOR', u'previousTabbable':
u'IA2_RELATION_PREVIOUS_TABBABLE', u'subwindowOf':
u'IA2_RELATION_SUBWINDOW_OF'}
WINEVENT_OUTOFCONTEXT = 0
WINEVENT_SKIPOWNTHREAD = 1
WINEVENT_SKIPOWNPROCESS = 2
WINEVENT_INCONTEXT = 4
EVENT_SYSTEM_SOUND = 1
EVENT_SYSTEM_ALERT = 2
EVENT_SYSTEM_FOREGROUND = 3
EVENT_SYSTEM_MENUSTART = 4
EVENT_SYSTEM_MENUEND = 5
EVENT_SYSTEM_MENUPOPUPSTART = 6
EVENT_SYSTEM_MENUPOPUPEND = 7
EVENT_SYSTEM_CAPTURESTART = 8
EVENT_SYSTEM_CAPTUREEND = 9
EVENT_SYSTEM_MOVESIZESTART = 10
EVENT_SYSTEM_MOVESIZEEND = 11
EVENT_SYSTEM_CONTEXTHELPSTART = 12
EVENT_SYSTEM_CONTEXTHELPEND = 13
EVENT_SYSTEM_DRAGDROPSTART = 14
EVENT_SYSTEM_DRAGDROPEND = 15
EVENT_SYSTEM_DIALOGSTART = 16
EVENT_SYSTEM_DIALOGEND = 17
EVENT_SYSTEM_SCROLLINGSTART = 18
EVENT_SYSTEM_SCROLLINGEND = 19
EVENT_SYSTEM_SWITCHSTART = 20
EVENT_SYSTEM_SWITCHEND = 21
EVENT_SYSTEM_MINIMIZESTART = 22
EVENT_SYSTEM_MINIMIZEEND = 23
EVENT_OBJECT_CREATE = 32768
EVENT_OBJECT_DESTROY = 32769
EVENT_OBJECT_SHOW = 32770
EVENT_OBJECT_HIDE = 32771
EVENT_OBJECT_REORDER = 32772
EVENT_OBJECT_FOCUS = 32773
EVENT_OBJECT_SELECTION = 32774
EVENT_OBJECT_SELECTIONADD = 32775
EVENT_OBJECT_SELECTIONREMOVE = 32776
EVENT_OBJECT_SELECTIONWITHIN = 32777
EVENT_OBJECT_STATECHANGE = 32778
EVENT_OBJECT_LOCATIONCHANGE = 32779
EVENT_OBJECT_NAMECHANGE = 32780
EVENT_OBJECT_DESCRIPTIONCHANGE = 32781
EVENT_OBJECT_VALUECHANGE = 32782
EVENT_OBJECT_PARENTCHANGE = 32783
EVENT_OBJECT_HELPCHANGE = 32784
EVENT_OBJECT_DEFACTIONCHANGE = 32785
EVENT_OBJECT_ACCELERATORCHANGE = 32786
EVENT_CONSOLE_CARET = 16385
EVENT_CONSOLE_UPDATE_REGION = 16386
EVENT_CONSOLE_UPDATE_SIMPLE = 16387
EVENT_CONSOLE_UPDATE_SCROLL = 16388
EVENT_CONSOLE_LAYOUT = 16389
EVENT_CONSOLE_START_APPLICATION = 16390
EVENT_CONSOLE_END_APPLICATION = 16391
IA2_EVENT_ACTION_CHANGED = 257
IA2_EVENT_ACTIVE_DECENDENT_CHANGED = 258
IA2_EVENT_ACTIVE_DESCENDANT_CHANGED = 258
IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED = 259
IA2_EVENT_DOCUMENT_CONTENT_CHANGED = 260
IA2_EVENT_DOCUMENT_LOAD_COMPLETE = 261
IA2_EVENT_DOCUMENT_LOAD_STOPPED = 262
IA2_EVENT_DOCUMENT_RELOAD = 263
IA2_EVENT_HYPERLINK_END_INDEX_CHANGED = 264
IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED = 265
IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED = 266
IA2_EVENT_HYPERTEXT_LINK_ACTIVATED = 267
IA2_EVENT_HYPERTEXT_LINK_SELECTED = 268
IA2_EVENT_HYPERLINK_START_INDEX_CHANGED = 269
IA2_EVENT_HYPERTEXT_CHANGED = 270
IA2_EVENT_HYPERTEXT_NLINKS_CHANGED = 287
IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED = 288
IA2_EVENT_PAGE_CHANGED = 273
IA2_EVENT_SECTION_CHANGED = 274
IA2_EVENT_TABLE_CAPTION_CHANGED = 275
IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED = 276
IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED = 277
IA2_EVENT_TABLE_MODEL_CHANGED = 278
IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED = 279
IA2_EVENT_TABLE_ROW_HEADER_CHANGED = 280
IA2_EVENT_TABLE_SUMMARY_CHANGED = 281
IA2_EVENT_TEXT_ATTRIBUTE_CHANGED = 282
IA2_EVENT_TEXT_CARET_MOVED = 283
IA2_EVENT_TEXT_CHANGED = 284
IA2_EVENT_TEXT_COLUMN_CHANGED = 285
IA2_EVENT_TEXT_INSERTED = 286
IA2_EVENT_TEXT_REMOVED = 287
IA2_EVENT_TEXT_UPDATED = 288
IA2_EVENT_TEXT_SELECTION_CHANGED = 289
IA2_EVENT_VISIBLE_DATA_CHANGED = 290
UNLOCALIZED_EVENT_NAMES = {(1): u'EVENT_SYSTEM_SOUND', (2):
u'EVENT_SYSTEM_ALERT', (3): u'EVENT_SYSTEM_FOREGROUND', (4):
u'EVENT_SYSTEM_MENUSTART', (5): u'EVENT_SYSTEM_MENUEND', (6):
u'EVENT_SYSTEM_MENUPOPUPSTART', (7): u'EVENT_SYSTEM_MENUPOPUPEND', (8):
u'EVENT_SYSTEM_CAPTURESTART', (9): u'EVENT_SYSTEM_CAPTUREEND', (10):
u'EVENT_SYSTEM_MOVESIZESTART', (11): u'EVENT_SYSTEM_MOVESIZEEND', (12):
u'EVENT_SYSTEM_CONTEXTHELPSTART', (13): u'EVENT_SYSTEM_CONTEXTHELPEND',
(14): u'EVENT_SYSTEM_DRAGDROPSTART', (15): u'EVENT_SYSTEM_DRAGDROPEND',
(16): u'EVENT_SYSTEM_DIALOGSTART', (17): u'EVENT_SYSTEM_DIALOGEND', (18
): u'EVENT_SYSTEM_SCROLLINGSTART', (19): u'EVENT_SYSTEM_SCROLLINGEND',
(20): u'EVENT_SYSTEM_SWITCHSTART', (21): u'EVENT_SYSTEM_SWITCHEND', (22
): u'EVENT_SYSTEM_MINIMIZESTART', (23): u'EVENT_SYSTEM_MINIMIZEEND', (
257): u'IA2_EVENT_ACTION_CHANGED', (258):
u'IA2_EVENT_ACTIVE_DESCENDANT_CHANGED', (259):
u'IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED', (260):
u'IA2_EVENT_DOCUMENT_CONTENT_CHANGED', (261):
u'IA2_EVENT_DOCUMENT_LOAD_COMPLETE', (262):
u'IA2_EVENT_DOCUMENT_LOAD_STOPPED', (263): u'IA2_EVENT_DOCUMENT_RELOAD',
(264): u'IA2_EVENT_HYPERLINK_END_INDEX_CHANGED', (265):
u'IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED', (266):
u'IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED', (267):
u'IA2_EVENT_HYPERTEXT_LINK_ACTIVATED', (268):
u'IA2_EVENT_HYPERTEXT_LINK_SELECTED', (269):
u'IA2_EVENT_HYPERLINK_START_INDEX_CHANGED', (270):
u'IA2_EVENT_HYPERTEXT_CHANGED', (271):
u'IA2_EVENT_HYPERTEXT_NLINKS_CHANGED', (272):
u'IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED', (273): u'IA2_EVENT_PAGE_CHANGED',
(274): u'IA2_EVENT_SECTION_CHANGED', (275):
u'IA2_EVENT_TABLE_CAPTION_CHANGED', (276):
u'IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED', (277):
u'IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED', (278):
u'IA2_EVENT_TABLE_MODEL_CHANGED', (279):
u'IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED', (280):
u'IA2_EVENT_TABLE_ROW_HEADER_CHANGED', (281):
u'IA2_EVENT_TABLE_SUMMARY_CHANGED', (282):
u'IA2_EVENT_TEXT_ATTRIBUTE_CHANGED', (283):
u'IA2_EVENT_TEXT_CARET_MOVED', (284): u'IA2_EVENT_TEXT_CHANGED', (285):
u'IA2_EVENT_TEXT_COLUMN_CHANGED', (286): u'IA2_EVENT_TEXT_INSERTED', (
287): u'IA2_EVENT_TEXT_REMOVED', (288): u'IA2_EVENT_TEXT_UPDATED', (289
): u'IA2_EVENT_TEXT_SELECTION_CHANGED', (290):
u'IA2_EVENT_VISIBLE_DATA_CHANGED', (16385): u'EVENT_CONSOLE_CARET', (
16386): u'EVENT_CONSOLE_UPDATE_REGION', (16387):
u'EVENT_CONSOLE_UPDATE_SIMPLE', (16388): u'EVENT_CONSOLE_UPDATE_SCROLL',
(16389): u'EVENT_CONSOLE_LAYOUT', (16390):
u'EVENT_CONSOLE_START_APPLICATION', (16391):
u'EVENT_CONSOLE_END_APPLICATION', (32768): u'EVENT_OBJECT_CREATE', (
32769): u'EVENT_OBJECT_DESTROY', (32770): u'EVENT_OBJECT_SHOW', (32771):
u'EVENT_OBJECT_HIDE', (32772): u'EVENT_OBJECT_REORDER', (32773):
u'EVENT_OBJECT_FOCUS', (32774): u'EVENT_OBJECT_SELECTION', (32775):
u'EVENT_OBJECT_SELECTIONADD', (32776): u'EVENT_OBJECT_SELECTIONREMOVE',
(32777): u'EVENT_OBJECT_SELECTIONWITHIN', (32778):
u'EVENT_OBJECT_STATECHANGE', (32779): u'EVENT_OBJECT_LOCATIONCHANGE', (
32780): u'EVENT_OBJECT_NAMECHANGE', (32781):
u'EVENT_OBJECT_DESCRIPTIONCHANGE', (32782): u'EVENT_OBJECT_VALUECHANGE',
(32783): u'EVENT_OBJECT_PARENTCHANGE', (32784):
u'EVENT_OBJECT_HELPCHANGE', (32785): u'EVENT_OBJECT_DEFACTIONCHANGE', (
32786): u'EVENT_OBJECT_ACCELERATORCHANGE'}
winEventIDsToEventNames = {}
for _sym, _val in locals().items():
if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):
winEventIDsToEventNames[_val] = _sym
<|reserved_special_token_1|>
'''
Useful constants.
Inspired by pyatspi:
http://live.gnome.org/GAP/PythonATSPI
@author: Eitan Isaacson
@copyright: Copyright (c) 2008, Eitan Isaacson
@license: LGPL
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
'''
# Child ID.
CHILDID_SELF = 0
# IAccessibleText Constants
IA2_TEXT_OFFSET_LENGTH = -1
IA2_TEXT_OFFSET_CARET = -2
# Accessible Roles
# TODO: Is there a way to retrieve this at runtime or build time?
#
ROLE_SYSTEM_ALERT = 8
ROLE_SYSTEM_ANIMATION = 54
ROLE_SYSTEM_APPLICATION = 14
ROLE_SYSTEM_BORDER = 19
ROLE_SYSTEM_BUTTONDROPDOWN = 56
ROLE_SYSTEM_BUTTONDROPDOWNGRID = 58
ROLE_SYSTEM_BUTTONMENU = 57
ROLE_SYSTEM_CARET = 7
ROLE_SYSTEM_CELL = 29
ROLE_SYSTEM_CHARACTER = 32
ROLE_SYSTEM_CHART = 17
ROLE_SYSTEM_CHECKBUTTON = 44
ROLE_SYSTEM_CLIENT = 10
ROLE_SYSTEM_CLOCK = 61
ROLE_SYSTEM_COLUMN = 27
ROLE_SYSTEM_COLUMNHEADER = 25
ROLE_SYSTEM_COMBOBOX = 46
ROLE_SYSTEM_CURSOR = 6
ROLE_SYSTEM_DIAGRAM = 53
ROLE_SYSTEM_DIAL = 49
ROLE_SYSTEM_DIALOG = 18
ROLE_SYSTEM_DOCUMENT = 15
ROLE_SYSTEM_DROPLIST = 47
ROLE_SYSTEM_EQUATION = 55
ROLE_SYSTEM_GRAPHIC = 40
ROLE_SYSTEM_GRIP = 4
ROLE_SYSTEM_GROUPING = 20
ROLE_SYSTEM_HELPBALLOON = 31
ROLE_SYSTEM_HOTKEYFIELD = 50
ROLE_SYSTEM_INDICATOR = 39
ROLE_SYSTEM_LINK = 30
ROLE_SYSTEM_LIST = 33
ROLE_SYSTEM_LISTITEM = 34
ROLE_SYSTEM_MENUBAR = 2
ROLE_SYSTEM_MENUITEM = 12
ROLE_SYSTEM_MENUPOPUP = 11
ROLE_SYSTEM_OUTLINE = 35
ROLE_SYSTEM_OUTLINEITEM = 36
ROLE_SYSTEM_PAGETAB = 37
ROLE_SYSTEM_PAGETABLIST = 60
ROLE_SYSTEM_PANE = 16
ROLE_SYSTEM_PROGRESSBAR = 48
ROLE_SYSTEM_PROPERTYPAGE = 38
ROLE_SYSTEM_PUSHBUTTON = 43
ROLE_SYSTEM_RADIOBUTTON = 45
ROLE_SYSTEM_ROW = 28
ROLE_SYSTEM_ROWHEADER = 26
ROLE_SYSTEM_SCROLLBAR = 3
ROLE_SYSTEM_SEPARATOR = 21
ROLE_SYSTEM_SLIDER = 51
ROLE_SYSTEM_SOUND = 5
ROLE_SYSTEM_SPINBUTTON = 52
ROLE_SYSTEM_STATICTEXT = 41
ROLE_SYSTEM_STATUSBAR = 23
ROLE_SYSTEM_TABLE = 24
ROLE_SYSTEM_TEXT = 42
ROLE_SYSTEM_TITLEBAR = 1
ROLE_SYSTEM_TOOLBAR = 22
ROLE_SYSTEM_TOOLTIP = 13
ROLE_SYSTEM_WHITESPACE = 59
ROLE_SYSTEM_WINDOW = 9
IA2_ROLE_UNKNOWN = 0
IA2_ROLE_CANVAS = 0x401
IA2_ROLE_CAPTION = 0x402
IA2_ROLE_CHECK_MENU_ITEM = 0x403
IA2_ROLE_COLOR_CHOOSER = 0x404
IA2_ROLE_DATE_EDITOR = 0x405
IA2_ROLE_DESKTOP_ICON = 0x406
IA2_ROLE_DESKTOP_PANE = 0x407
IA2_ROLE_DIRECTORY_PANE = 0x408
IA2_ROLE_EDITBAR = 0x409
IA2_ROLE_EMBEDDED_OBJECT = 0x40a
IA2_ROLE_ENDNOTE = 0x40b
IA2_ROLE_FILE_CHOOSER = 0x40c
IA2_ROLE_FONT_CHOOSER = 0x40d
IA2_ROLE_FOOTER = 0x40e
IA2_ROLE_FOOTNOTE = 0x40f
IA2_ROLE_FORM = 0x410
IA2_ROLE_FRAME = 0x411
IA2_ROLE_GLASS_PANE = 0x412
IA2_ROLE_HEADER = 0x413
IA2_ROLE_HEADING = 0x414
IA2_ROLE_ICON = 0x415
IA2_ROLE_IMAGE_MAP = 0x416
IA2_ROLE_INPUT_METHOD_WINDOW = 0x417
IA2_ROLE_INTERNAL_FRAME = 0x418
IA2_ROLE_LABEL = 0x419
IA2_ROLE_LAYERED_PANE = 0x41a
IA2_ROLE_NOTE = 0x41b
IA2_ROLE_OPTION_PANE = 0x41c
IA2_ROLE_PAGE = 0x41d
IA2_ROLE_PARAGRAPH = 0x41e
IA2_ROLE_RADIO_MENU_ITEM = 0x41f
IA2_ROLE_REDUNDANT_OBJECT = 0x420
IA2_ROLE_ROOT_PANE = 0x421
IA2_ROLE_RULER = 0x422
IA2_ROLE_SCROLL_PANE = 0x423
IA2_ROLE_SECTION = 0x424
IA2_ROLE_SHAPE = 0x425
IA2_ROLE_SPLIT_PANE = 0x426
IA2_ROLE_TEAR_OFF_MENU = 0x427
IA2_ROLE_TERMINAL = 0x428
IA2_ROLE_TEXT_FRAME = 0x429
IA2_ROLE_TOGGLE_BUTTON = 0x42a
IA2_ROLE_VIEW_PORT = 0x42b
IA2_ROLE_COMPLEMENTARY_CONTENT = 0x42c
IA2_ROLE_LANDMARK = 0x42d
# Unlocalized role strings
UNLOCALIZED_ROLE_NAMES = {
1: u'ROLE_SYSTEM_TITLEBAR',
2: u'ROLE_SYSTEM_MENUBAR',
3: u'ROLE_SYSTEM_SCROLLBAR',
4: u'ROLE_SYSTEM_GRIP',
5: u'ROLE_SYSTEM_SOUND',
6: u'ROLE_SYSTEM_CURSOR',
7: u'ROLE_SYSTEM_CARET',
8: u'ROLE_SYSTEM_ALERT',
9: u'ROLE_SYSTEM_WINDOW',
10: u'ROLE_SYSTEM_CLIENT',
11: u'ROLE_SYSTEM_MENUPOPUP',
12: u'ROLE_SYSTEM_MENUITEM',
13: u'ROLE_SYSTEM_TOOLTIP',
14: u'ROLE_SYSTEM_APPLICATION',
15: u'ROLE_SYSTEM_DOCUMENT',
16: u'ROLE_SYSTEM_PANE',
17: u'ROLE_SYSTEM_CHART',
18: u'ROLE_SYSTEM_DIALOG',
19: u'ROLE_SYSTEM_BORDER',
20: u'ROLE_SYSTEM_GROUPING',
21: u'ROLE_SYSTEM_SEPARATOR',
22: u'ROLE_SYSTEM_TOOLBAR',
23: u'ROLE_SYSTEM_STATUSBAR',
24: u'ROLE_SYSTEM_TABLE',
25: u'ROLE_SYSTEM_COLUMNHEADER',
26: u'ROLE_SYSTEM_ROWHEADER',
27: u'ROLE_SYSTEM_COLUMN',
28: u'ROLE_SYSTEM_ROW',
29: u'ROLE_SYSTEM_CELL',
30: u'ROLE_SYSTEM_LINK',
31: u'ROLE_SYSTEM_HELPBALLOON',
32: u'ROLE_SYSTEM_CHARACTER',
33: u'ROLE_SYSTEM_LIST',
34: u'ROLE_SYSTEM_LISTITEM',
35: u'ROLE_SYSTEM_OUTLINE',
36: u'ROLE_SYSTEM_OUTLINEITEM',
37: u'ROLE_SYSTEM_PAGETAB',
38: u'ROLE_SYSTEM_PROPERTYPAGE',
39: u'ROLE_SYSTEM_INDICATOR',
40: u'ROLE_SYSTEM_GRAPHIC',
41: u'ROLE_SYSTEM_STATICTEXT',
42: u'ROLE_SYSTEM_TEXT',
43: u'ROLE_SYSTEM_PUSHBUTTON',
44: u'ROLE_SYSTEM_CHECKBUTTON',
45: u'ROLE_SYSTEM_RADIOBUTTON',
46: u'ROLE_SYSTEM_COMBOBOX',
47: u'ROLE_SYSTEM_DROPLIST',
48: u'ROLE_SYSTEM_PROGRESSBAR',
49: u'ROLE_SYSTEM_DIAL',
50: u'ROLE_SYSTEM_HOTKEYFIELD',
51: u'ROLE_SYSTEM_SLIDER',
52: u'ROLE_SYSTEM_SPINBUTTON',
53: u'ROLE_SYSTEM_DIAGRAM',
54: u'ROLE_SYSTEM_ANIMATION',
55: u'ROLE_SYSTEM_EQUATION',
56: u'ROLE_SYSTEM_BUTTONDROPDOWN',
57: u'ROLE_SYSTEM_BUTTONMENU',
58: u'ROLE_SYSTEM_BUTTONDROPDOWNGRID',
59: u'ROLE_SYSTEM_WHITESPACE',
60: u'ROLE_SYSTEM_PAGETABLIST',
61: u'ROLE_SYSTEM_CLOCK'}
# Unlocalized role strings
UNLOCALIZED_IA2_ROLE_NAMES = {
0x000: u'IA2_ROLE_UNKNOWN',
0x401: u'IA2_ROLE_CANVAS',
0x402: u'IA2_ROLE_CAPTION',
0x403: u'IA2_ROLE_CHECK_MENU_ITEM',
0x404: u'IA2_ROLE_COLOR_CHOOSER',
0x405: u'IA2_ROLE_DATE_EDITOR',
0x406: u'IA2_ROLE_DESKTOP_ICON',
0x407: u'IA2_ROLE_DESKTOP_PANE',
0x408: u'IA2_ROLE_DIRECTORY_PANE',
0x409: u'IA2_ROLE_EDITBAR',
0x40a: u'IA2_ROLE_EMBEDDED_OBJECT',
0x40b: u'IA2_ROLE_ENDNOTE',
0x40c: u'IA2_ROLE_FILE_CHOOSER',
0x40d: u'IA2_ROLE_FONT_CHOOSER',
0x40e: u'IA2_ROLE_FOOTER',
0x40f: u'IA2_ROLE_FOOTNOTE',
0x410: u'IA2_ROLE_FORM',
0x411: u'IA2_ROLE_FRAME',
0x412: u'IA2_ROLE_GLASS_PANE',
0x413: u'IA2_ROLE_HEADER',
0x414: u'IA2_ROLE_HEADING',
0x415: u'IA2_ROLE_ICON',
0x416: u'IA2_ROLE_IMAGE_MAP',
0x417: u'IA2_ROLE_INPUT_METHOD_WINDOW',
0x418: u'IA2_ROLE_INTERNAL_FRAME',
0x419: u'IA2_ROLE_LABEL',
0x41a: u'IA2_ROLE_LAYERED_PANE',
0x41b: u'IA2_ROLE_NOTE',
0x41c: u'IA2_ROLE_OPTION_PANE',
0x41d: u'IA2_ROLE_PAGE',
0x41e: u'IA2_ROLE_PARAGRAPH',
0x41f: u'IA2_ROLE_RADIO_MENU_ITEM',
0x420: u'IA2_ROLE_REDUNDANT_OBJECT',
0x421: u'IA2_ROLE_ROOT_PANE',
0x422: u'IA2_ROLE_RULER',
0x423: u'IA2_ROLE_SCROLL_PANE',
0x424: u'IA2_ROLE_SECTION',
0x425: u'IA2_ROLE_SHAPE',
0x426: u'IA2_ROLE_SPLIT_PANE',
0x427: u'IA2_ROLE_TEAR_OFF_MENU',
0x428: u'IA2_ROLE_TERMINAL',
0x429: u'IA2_ROLE_TEXT_FRAME',
0x42a: u'IA2_ROLE_TOGGLE_BUTTON',
0x42b: u'IA2_ROLE_VIEW_PORT',
0x42c: u'IA2_ROLE_COMPLEMENTARY_CONTENT',
0x42d: u'IA2_ROLE_LANDMARK'}
# Navigation constants
NAVDIR_DOWN = 2
NAVDIR_FIRSTCHILD = 7
NAVDIR_LASTCHILD = 8
NAVDIR_LEFT = 3
NAVDIR_NEXT = 5
NAVDIR_PREVIOUS = 6
NAVDIR_RIGHT = 4
NAVDIR_UP = 1
STATE_SYSTEM_UNAVAILABLE = 0x1
STATE_SYSTEM_SELECTED = 0x2
STATE_SYSTEM_FOCUSED = 0x4
STATE_SYSTEM_PRESSED = 0x8
STATE_SYSTEM_CHECKED = 0x10
STATE_SYSTEM_MIXED = 0x20
STATE_SYSTEM_READONLY = 0x40
STATE_SYSTEM_HOTTRACKED = 0x80
STATE_SYSTEM_DEFAULT = 0x100
STATE_SYSTEM_EXPANDED = 0x200
STATE_SYSTEM_COLLAPSED = 0x400
STATE_SYSTEM_BUSY = 0x800
STATE_SYSTEM_FLOATING = 0x1000
STATE_SYSTEM_MARQUEED = 0x2000
STATE_SYSTEM_ANIMATED = 0x4000
STATE_SYSTEM_INVISIBLE = 0x8000
STATE_SYSTEM_OFFSCREEN = 0x10000
STATE_SYSTEM_SIZEABLE = 0x20000
STATE_SYSTEM_MOVEABLE = 0x40000
STATE_SYSTEM_SELFVOICING = 0x80000
STATE_SYSTEM_FOCUSABLE = 0x100000
STATE_SYSTEM_SELECTABLE = 0x200000
STATE_SYSTEM_LINKED = 0x400000
STATE_SYSTEM_TRAVERSED = 0x800000
STATE_SYSTEM_MULTISELECTABLE = 0x1000000
STATE_SYSTEM_EXTSELECTABLE = 0x2000000
STATE_SYSTEM_HASSUBMENU = 0x4000000
STATE_SYSTEM_ALERT_LOW = 0x4000000
STATE_SYSTEM_ALERT_MEDIUM = 0x8000000
STATE_SYSTEM_ALERT_HIGH = 0x10000000
STATE_SYSTEM_PROTECTED = 0x20000000
STATE_SYSTEM_HASPOPUP = 0x40000000
STATE_SYSTEM_VALID = 0x1fffffff
# Unlocalized state strings
UNLOCALIZED_STATE_NAMES = {
1: u'STATE_SYSTEM_UNAVAILABLE',
2: u'STATE_SYSTEM_SELECTED',
4: u'STATE_SYSTEM_FOCUSED',
8: u'STATE_SYSTEM_PRESSED',
16: u'STATE_SYSTEM_CHECKED',
32: u'STATE_SYSTEM_MIXED',
64: u'STATE_SYSTEM_READONLY',
128: u'STATE_SYSTEM_HOTTRACKED',
256: u'STATE_SYSTEM_DEFAULT',
512: u'STATE_SYSTEM_EXPANDED',
1024: u'STATE_SYSTEM_COLLAPSED',
2048: u'STATE_SYSTEM_BUSY',
4096: u'STATE_SYSTEM_FLOATING',
8192: u'STATE_SYSTEM_MARQUEED',
16384: u'STATE_SYSTEM_ANIMATED',
32768: u'STATE_SYSTEM_INVISIBLE',
65536: u'STATE_SYSTEM_OFFSCREEN',
131072: u'STATE_SYSTEM_SIZEABLE',
262144: u'STATE_SYSTEM_MOVEABLE',
524288: u'STATE_SYSTEM_SELFVOICING',
1048576: u'STATE_SYSTEM_FOCUSABLE',
2097152: u'STATE_SYSTEM_SELECTABLE',
4194304: u'STATE_SYSTEM_LINKED',
8388608: u'STATE_SYSTEM_TRAVERSED',
16777216: u'STATE_SYSTEM_MULTISELECTABLE',
33554432: u'STATE_SYSTEM_EXTSELECTABLE',
67108864: u'STATE_SYSTEM_ALERT_LOW',
134217728: u'STATE_SYSTEM_ALERT_MEDIUM',
268435456: u'STATE_SYSTEM_ALERT_HIGH',
536870912: u'STATE_SYSTEM_PROTECTED',
1073741824: u'STATE_SYSTEM_HASPOPUP',
0x1fffffff: u'STATE_SYSTEM_VALID'}
IA2_STATE_ACTIVE = 0x1
IA2_STATE_ARMED = 0x2
IA2_STATE_DEFUNCT = 0x4
IA2_STATE_EDITABLE = 0x8
IA2_STATE_HORIZONTAL = 0x10
IA2_STATE_ICONIFIED = 0x20
IA2_STATE_INVALID_ENTRY = 0x40
IA2_STATE_MANAGES_DESCENDANTS = 0x80
IA2_STATE_MODAL = 0x100
IA2_STATE_MULTI_LINE = 0x200
IA2_STATE_OPAQUE = 0x400
IA2_STATE_REQUIRED = 0x800
IA2_STATE_SELECTABLE_TEXT = 0x1000
IA2_STATE_SINGLE_LINE = 0x2000
IA2_STATE_STALE = 0x4000
IA2_STATE_SUPPORTS_AUTOCOMPLETION = 0x8000
IA2_STATE_TRANSIENT = 0x10000
IA2_STATE_VERTICAL = 0x20000
IA2_STATE_CHECKABLE = 0x40000
IA2_STATE_PINNED = 0x80000
UNLOCALIZED_IA2_STATE_NAMES = {
1: u'IA2_STATE_ACTIVE',
2: u'IA2_STATE_ARMED',
4: u'IA2_STATE_DEFUNCT',
8: u'IA2_STATE_EDITABLE',
16: u'IA2_STATE_HORIZONTAL',
32: u'IA2_STATE_ICONIFIED',
64: u'IA2_STATE_INVALID_ENTRY',
128: u'IA2_STATE_MANAGES_DESCENDANTS',
256: u'IA2_STATE_MODAL',
512: u'IA2_STATE_MULTI_LINE',
1024: u'IA2_STATE_OPAQUE',
2048: u'IA2_STATE_REQUIRED',
4096: u'IA2_STATE_SELECTABLE_TEXT',
8192: u'IA2_STATE_SINGLE_LINE',
16384: u'IA2_STATE_STALE',
32768: u'IA2_STATE_SUPPORTS_AUTOCOMPLETION',
65536: u'IA2_STATE_TRANSIENT',
131072: u'IA2_STATE_VERTICAL',
262144: u'IA2_STATE_CHECKABLE',
524288: u'IA2_STATE_PINNED'}
UNLOCALIZED_IA2_RELATION_TYPES = {
u'containingApplication' : u'IA2_RELATION_CONTAINING_APPLICATION',
u'containingDocument' : u'IA2_RELATION_CONTAINING_DOCUMENT',
u'containingTabPane' : u'IA2_RELATION_CONTAINING_TAB_PANE',
u'containingWindow' : u'IA2_RELATION_CONTAINING_WINDOW',
u'controlledBy' : u'IA2_RELATION_CONTROLLED_BY',
u'controllerFor' : u'IA2_RELATION_CONTROLLER_FOR',
u'describedBy' : u'IA2_RELATION_DESCRIBED_BY',
u'descriptionFor' : u'IA2_RELATION_DESCRIPTION_FOR',
u'details' : u'IA2_RELATION_DETAILS',
u'detailsFor' : u'IA2_RELATION_DETAILS_FOR',
u'embeddedBy' : u'IA2_RELATION_EMBEDDED_BY',
u'embeds' : u'IA2_RELATION_EMBEDS',
u'errorMessage' : u'IA2_RELATION_ERROR_MESSAGE',
u'errorFor' : u'IA2_RELATION_ERROR_FOR',
u'flowsFrom' : u'IA2_RELATION_FLOWS_FROM',
u'flowsTo' : u'IA2_RELATION_FLOWS_TO',
u'labelFor' : u'IA2_RELATION_LABEL_FOR',
u'labelledBy' : u'IA2_RELATION_LABELED_BY',
u'labelledBy' : u'IA2_RELATION_LABELLED_BY',
u'memberOf' : u'IA2_RELATION_MEMBER_OF',
u'nextTabbable' : u'IA2_RELATION_NEXT_TABBABLE',
u'nodeChildOf' : u'IA2_RELATION_NODE_CHILD_OF',
u'nodeParentOf' : u'IA2_RELATION_NODE_PARENT_OF',
u'parentWindowOf' : u'IA2_RELATION_PARENT_WINDOW_OF',
u'popupFor' : u'IA2_RELATION_POPUP_FOR',
u'previousTabbable' : u'IA2_RELATION_PREVIOUS_TABBABLE',
u'subwindowOf' : u'IA2_RELATION_SUBWINDOW_OF'}
# SetWinEventHook() flags
WINEVENT_OUTOFCONTEXT = 0x0
WINEVENT_SKIPOWNTHREAD =0x1
WINEVENT_SKIPOWNPROCESS = 0x2
WINEVENT_INCONTEXT = 0x4
#win events
EVENT_SYSTEM_SOUND = 0x1
EVENT_SYSTEM_ALERT = 0x2
EVENT_SYSTEM_FOREGROUND = 0x3
EVENT_SYSTEM_MENUSTART = 0x4
EVENT_SYSTEM_MENUEND = 0x5
EVENT_SYSTEM_MENUPOPUPSTART = 0x6
EVENT_SYSTEM_MENUPOPUPEND = 0x7
EVENT_SYSTEM_CAPTURESTART = 0x8
EVENT_SYSTEM_CAPTUREEND = 0x9
EVENT_SYSTEM_MOVESIZESTART = 0xa
EVENT_SYSTEM_MOVESIZEEND = 0xb
EVENT_SYSTEM_CONTEXTHELPSTART = 0xc
EVENT_SYSTEM_CONTEXTHELPEND = 0xd
EVENT_SYSTEM_DRAGDROPSTART = 0xe
EVENT_SYSTEM_DRAGDROPEND = 0xf
EVENT_SYSTEM_DIALOGSTART = 0x10
EVENT_SYSTEM_DIALOGEND = 0x11
EVENT_SYSTEM_SCROLLINGSTART = 0x12
EVENT_SYSTEM_SCROLLINGEND = 0x13
EVENT_SYSTEM_SWITCHSTART = 0x14
EVENT_SYSTEM_SWITCHEND = 0x15
EVENT_SYSTEM_MINIMIZESTART = 0x16
EVENT_SYSTEM_MINIMIZEEND = 0x17
EVENT_OBJECT_CREATE = 0x8000
EVENT_OBJECT_DESTROY = 0x8001
EVENT_OBJECT_SHOW = 0x8002
EVENT_OBJECT_HIDE = 0x8003
EVENT_OBJECT_REORDER = 0x8004
EVENT_OBJECT_FOCUS = 0x8005
EVENT_OBJECT_SELECTION = 0x8006
EVENT_OBJECT_SELECTIONADD = 0x8007
EVENT_OBJECT_SELECTIONREMOVE = 0x8008
EVENT_OBJECT_SELECTIONWITHIN = 0x8009
EVENT_OBJECT_STATECHANGE = 0x800a
EVENT_OBJECT_LOCATIONCHANGE = 0x800b
EVENT_OBJECT_NAMECHANGE = 0x800c
EVENT_OBJECT_DESCRIPTIONCHANGE = 0x800d
EVENT_OBJECT_VALUECHANGE = 0x800e
EVENT_OBJECT_PARENTCHANGE = 0x800f
EVENT_OBJECT_HELPCHANGE = 0x8010
EVENT_OBJECT_DEFACTIONCHANGE = 0x8011
EVENT_OBJECT_ACCELERATORCHANGE = 0x8012
EVENT_CONSOLE_CARET = 0x4001
EVENT_CONSOLE_UPDATE_REGION = 0x4002
EVENT_CONSOLE_UPDATE_SIMPLE = 0x4003
EVENT_CONSOLE_UPDATE_SCROLL = 0x4004
EVENT_CONSOLE_LAYOUT = 0x4005
EVENT_CONSOLE_START_APPLICATION = 0x4006
EVENT_CONSOLE_END_APPLICATION = 0x4007
# IAccessible2 events
IA2_EVENT_ACTION_CHANGED = 0x101
IA2_EVENT_ACTIVE_DECENDENT_CHANGED = 0x102
IA2_EVENT_ACTIVE_DESCENDANT_CHANGED = 0x102
IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED = 0x103
IA2_EVENT_DOCUMENT_CONTENT_CHANGED = 0x104
IA2_EVENT_DOCUMENT_LOAD_COMPLETE = 0x105
IA2_EVENT_DOCUMENT_LOAD_STOPPED = 0x106
IA2_EVENT_DOCUMENT_RELOAD = 0x107
IA2_EVENT_HYPERLINK_END_INDEX_CHANGED = 0x108
IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED = 0x109
IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED = 0x10a
IA2_EVENT_HYPERTEXT_LINK_ACTIVATED = 0x10b
IA2_EVENT_HYPERTEXT_LINK_SELECTED = 0x10c
IA2_EVENT_HYPERLINK_START_INDEX_CHANGED = 0x10d
IA2_EVENT_HYPERTEXT_CHANGED = 0x10e
IA2_EVENT_HYPERTEXT_NLINKS_CHANGED = 0x11f
IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED = 0x120
IA2_EVENT_PAGE_CHANGED = 0x111
IA2_EVENT_SECTION_CHANGED = 0x112
IA2_EVENT_TABLE_CAPTION_CHANGED = 0x113
IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED = 0x114
IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED = 0x115
IA2_EVENT_TABLE_MODEL_CHANGED = 0x116
IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED = 0x117
IA2_EVENT_TABLE_ROW_HEADER_CHANGED = 0x118
IA2_EVENT_TABLE_SUMMARY_CHANGED = 0x119
IA2_EVENT_TEXT_ATTRIBUTE_CHANGED = 0x11a
IA2_EVENT_TEXT_CARET_MOVED = 0x11b
IA2_EVENT_TEXT_CHANGED = 0x11c
IA2_EVENT_TEXT_COLUMN_CHANGED = 0x11d
IA2_EVENT_TEXT_INSERTED = 0x11e
IA2_EVENT_TEXT_REMOVED = 0x11f
IA2_EVENT_TEXT_UPDATED = 0x120
IA2_EVENT_TEXT_SELECTION_CHANGED = 0x121
IA2_EVENT_VISIBLE_DATA_CHANGED = 0x122
UNLOCALIZED_EVENT_NAMES = {
0x1: u'EVENT_SYSTEM_SOUND',
0x2: u'EVENT_SYSTEM_ALERT',
0x3: u'EVENT_SYSTEM_FOREGROUND',
0x4: u'EVENT_SYSTEM_MENUSTART',
0x5: u'EVENT_SYSTEM_MENUEND',
0x6: u'EVENT_SYSTEM_MENUPOPUPSTART',
0x7: u'EVENT_SYSTEM_MENUPOPUPEND',
0x8: u'EVENT_SYSTEM_CAPTURESTART',
0x9: u'EVENT_SYSTEM_CAPTUREEND',
0xa: u'EVENT_SYSTEM_MOVESIZESTART',
0xb: u'EVENT_SYSTEM_MOVESIZEEND',
0xc: u'EVENT_SYSTEM_CONTEXTHELPSTART',
0xd: u'EVENT_SYSTEM_CONTEXTHELPEND',
0xe: u'EVENT_SYSTEM_DRAGDROPSTART',
0xf: u'EVENT_SYSTEM_DRAGDROPEND',
0x10: u'EVENT_SYSTEM_DIALOGSTART',
0x11: u'EVENT_SYSTEM_DIALOGEND',
0x12: u'EVENT_SYSTEM_SCROLLINGSTART',
0x13: u'EVENT_SYSTEM_SCROLLINGEND',
0x14: u'EVENT_SYSTEM_SWITCHSTART',
0x15: u'EVENT_SYSTEM_SWITCHEND',
0x16: u'EVENT_SYSTEM_MINIMIZESTART',
0x17: u'EVENT_SYSTEM_MINIMIZEEND',
0x101: u'IA2_EVENT_ACTION_CHANGED',
0x102: u'IA2_EVENT_ACTIVE_DESCENDANT_CHANGED',
0x103: u'IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED',
0x104: u'IA2_EVENT_DOCUMENT_CONTENT_CHANGED',
0x105: u'IA2_EVENT_DOCUMENT_LOAD_COMPLETE',
0x106: u'IA2_EVENT_DOCUMENT_LOAD_STOPPED',
0x107: u'IA2_EVENT_DOCUMENT_RELOAD',
0x108: u'IA2_EVENT_HYPERLINK_END_INDEX_CHANGED',
0x109: u'IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED',
0x10a: u'IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED',
0x10b: u'IA2_EVENT_HYPERTEXT_LINK_ACTIVATED',
0x10c: u'IA2_EVENT_HYPERTEXT_LINK_SELECTED',
0x10d: u'IA2_EVENT_HYPERLINK_START_INDEX_CHANGED',
0x10e: u'IA2_EVENT_HYPERTEXT_CHANGED',
0x10f: u'IA2_EVENT_HYPERTEXT_NLINKS_CHANGED',
0x110: u'IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED',
0x111: u'IA2_EVENT_PAGE_CHANGED',
0x112: u'IA2_EVENT_SECTION_CHANGED',
0x113: u'IA2_EVENT_TABLE_CAPTION_CHANGED',
0x114: u'IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED',
0x115: u'IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED',
0x116: u'IA2_EVENT_TABLE_MODEL_CHANGED',
0x117: u'IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED',
0x118: u'IA2_EVENT_TABLE_ROW_HEADER_CHANGED',
0x119: u'IA2_EVENT_TABLE_SUMMARY_CHANGED',
0x11a: u'IA2_EVENT_TEXT_ATTRIBUTE_CHANGED',
0x11b: u'IA2_EVENT_TEXT_CARET_MOVED',
0x11c: u'IA2_EVENT_TEXT_CHANGED',
0x11d: u'IA2_EVENT_TEXT_COLUMN_CHANGED',
0x11e: u'IA2_EVENT_TEXT_INSERTED',
0x11f: u'IA2_EVENT_TEXT_REMOVED',
0x120: u'IA2_EVENT_TEXT_UPDATED',
0x121: u'IA2_EVENT_TEXT_SELECTION_CHANGED',
0x122: u'IA2_EVENT_VISIBLE_DATA_CHANGED',
0x4001: u'EVENT_CONSOLE_CARET',
0x4002: u'EVENT_CONSOLE_UPDATE_REGION',
0x4003: u'EVENT_CONSOLE_UPDATE_SIMPLE',
0x4004: u'EVENT_CONSOLE_UPDATE_SCROLL',
0x4005: u'EVENT_CONSOLE_LAYOUT',
0x4006: u'EVENT_CONSOLE_START_APPLICATION',
0x4007: u'EVENT_CONSOLE_END_APPLICATION',
0x8000: u'EVENT_OBJECT_CREATE',
0x8001: u'EVENT_OBJECT_DESTROY',
0x8002: u'EVENT_OBJECT_SHOW',
0x8003: u'EVENT_OBJECT_HIDE',
0x8004: u'EVENT_OBJECT_REORDER',
0x8005: u'EVENT_OBJECT_FOCUS',
0x8006: u'EVENT_OBJECT_SELECTION',
0x8007: u'EVENT_OBJECT_SELECTIONADD',
0x8008: u'EVENT_OBJECT_SELECTIONREMOVE',
0x8009: u'EVENT_OBJECT_SELECTIONWITHIN',
0x800a: u'EVENT_OBJECT_STATECHANGE',
0x800b: u'EVENT_OBJECT_LOCATIONCHANGE',
0x800c: u'EVENT_OBJECT_NAMECHANGE',
0x800d: u'EVENT_OBJECT_DESCRIPTIONCHANGE',
0x800e: u'EVENT_OBJECT_VALUECHANGE',
0x800f: u'EVENT_OBJECT_PARENTCHANGE',
0x8010: u'EVENT_OBJECT_HELPCHANGE',
0x8011: u'EVENT_OBJECT_DEFACTIONCHANGE',
0x8012: u'EVENT_OBJECT_ACCELERATORCHANGE'}
winEventIDsToEventNames={}
for _sym, _val in locals().items():
if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):
winEventIDsToEventNames[_val] = _sym
|
flexible
|
{
"blob_id": "5ec2ac3e0d66026da1b0c957d10c95e95c201f8f",
"index": 9032,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _sym, _val in locals().items():\n if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):\n winEventIDsToEventNames[_val] = _sym\n",
"step-3": "<mask token>\nCHILDID_SELF = 0\nIA2_TEXT_OFFSET_LENGTH = -1\nIA2_TEXT_OFFSET_CARET = -2\nROLE_SYSTEM_ALERT = 8\nROLE_SYSTEM_ANIMATION = 54\nROLE_SYSTEM_APPLICATION = 14\nROLE_SYSTEM_BORDER = 19\nROLE_SYSTEM_BUTTONDROPDOWN = 56\nROLE_SYSTEM_BUTTONDROPDOWNGRID = 58\nROLE_SYSTEM_BUTTONMENU = 57\nROLE_SYSTEM_CARET = 7\nROLE_SYSTEM_CELL = 29\nROLE_SYSTEM_CHARACTER = 32\nROLE_SYSTEM_CHART = 17\nROLE_SYSTEM_CHECKBUTTON = 44\nROLE_SYSTEM_CLIENT = 10\nROLE_SYSTEM_CLOCK = 61\nROLE_SYSTEM_COLUMN = 27\nROLE_SYSTEM_COLUMNHEADER = 25\nROLE_SYSTEM_COMBOBOX = 46\nROLE_SYSTEM_CURSOR = 6\nROLE_SYSTEM_DIAGRAM = 53\nROLE_SYSTEM_DIAL = 49\nROLE_SYSTEM_DIALOG = 18\nROLE_SYSTEM_DOCUMENT = 15\nROLE_SYSTEM_DROPLIST = 47\nROLE_SYSTEM_EQUATION = 55\nROLE_SYSTEM_GRAPHIC = 40\nROLE_SYSTEM_GRIP = 4\nROLE_SYSTEM_GROUPING = 20\nROLE_SYSTEM_HELPBALLOON = 31\nROLE_SYSTEM_HOTKEYFIELD = 50\nROLE_SYSTEM_INDICATOR = 39\nROLE_SYSTEM_LINK = 30\nROLE_SYSTEM_LIST = 33\nROLE_SYSTEM_LISTITEM = 34\nROLE_SYSTEM_MENUBAR = 2\nROLE_SYSTEM_MENUITEM = 12\nROLE_SYSTEM_MENUPOPUP = 11\nROLE_SYSTEM_OUTLINE = 35\nROLE_SYSTEM_OUTLINEITEM = 36\nROLE_SYSTEM_PAGETAB = 37\nROLE_SYSTEM_PAGETABLIST = 60\nROLE_SYSTEM_PANE = 16\nROLE_SYSTEM_PROGRESSBAR = 48\nROLE_SYSTEM_PROPERTYPAGE = 38\nROLE_SYSTEM_PUSHBUTTON = 43\nROLE_SYSTEM_RADIOBUTTON = 45\nROLE_SYSTEM_ROW = 28\nROLE_SYSTEM_ROWHEADER = 26\nROLE_SYSTEM_SCROLLBAR = 3\nROLE_SYSTEM_SEPARATOR = 21\nROLE_SYSTEM_SLIDER = 51\nROLE_SYSTEM_SOUND = 5\nROLE_SYSTEM_SPINBUTTON = 52\nROLE_SYSTEM_STATICTEXT = 41\nROLE_SYSTEM_STATUSBAR = 23\nROLE_SYSTEM_TABLE = 24\nROLE_SYSTEM_TEXT = 42\nROLE_SYSTEM_TITLEBAR = 1\nROLE_SYSTEM_TOOLBAR = 22\nROLE_SYSTEM_TOOLTIP = 13\nROLE_SYSTEM_WHITESPACE = 59\nROLE_SYSTEM_WINDOW = 9\nIA2_ROLE_UNKNOWN = 0\nIA2_ROLE_CANVAS = 1025\nIA2_ROLE_CAPTION = 1026\nIA2_ROLE_CHECK_MENU_ITEM = 1027\nIA2_ROLE_COLOR_CHOOSER = 1028\nIA2_ROLE_DATE_EDITOR = 1029\nIA2_ROLE_DESKTOP_ICON = 1030\nIA2_ROLE_DESKTOP_PANE = 1031\nIA2_ROLE_DIRECTORY_PANE = 1032\nIA2_ROLE_EDITBAR = 1033\nIA2_ROLE_EMBEDDED_OBJECT = 1034\nIA2_ROLE_ENDNOTE = 1035\nIA2_ROLE_FILE_CHOOSER = 1036\nIA2_ROLE_FONT_CHOOSER = 1037\nIA2_ROLE_FOOTER = 1038\nIA2_ROLE_FOOTNOTE = 1039\nIA2_ROLE_FORM = 1040\nIA2_ROLE_FRAME = 1041\nIA2_ROLE_GLASS_PANE = 1042\nIA2_ROLE_HEADER = 1043\nIA2_ROLE_HEADING = 1044\nIA2_ROLE_ICON = 1045\nIA2_ROLE_IMAGE_MAP = 1046\nIA2_ROLE_INPUT_METHOD_WINDOW = 1047\nIA2_ROLE_INTERNAL_FRAME = 1048\nIA2_ROLE_LABEL = 1049\nIA2_ROLE_LAYERED_PANE = 1050\nIA2_ROLE_NOTE = 1051\nIA2_ROLE_OPTION_PANE = 1052\nIA2_ROLE_PAGE = 1053\nIA2_ROLE_PARAGRAPH = 1054\nIA2_ROLE_RADIO_MENU_ITEM = 1055\nIA2_ROLE_REDUNDANT_OBJECT = 1056\nIA2_ROLE_ROOT_PANE = 1057\nIA2_ROLE_RULER = 1058\nIA2_ROLE_SCROLL_PANE = 1059\nIA2_ROLE_SECTION = 1060\nIA2_ROLE_SHAPE = 1061\nIA2_ROLE_SPLIT_PANE = 1062\nIA2_ROLE_TEAR_OFF_MENU = 1063\nIA2_ROLE_TERMINAL = 1064\nIA2_ROLE_TEXT_FRAME = 1065\nIA2_ROLE_TOGGLE_BUTTON = 1066\nIA2_ROLE_VIEW_PORT = 1067\nIA2_ROLE_COMPLEMENTARY_CONTENT = 1068\nIA2_ROLE_LANDMARK = 1069\nUNLOCALIZED_ROLE_NAMES = {(1): u'ROLE_SYSTEM_TITLEBAR', (2):\n u'ROLE_SYSTEM_MENUBAR', (3): u'ROLE_SYSTEM_SCROLLBAR', (4):\n u'ROLE_SYSTEM_GRIP', (5): u'ROLE_SYSTEM_SOUND', (6):\n u'ROLE_SYSTEM_CURSOR', (7): u'ROLE_SYSTEM_CARET', (8):\n u'ROLE_SYSTEM_ALERT', (9): u'ROLE_SYSTEM_WINDOW', (10):\n u'ROLE_SYSTEM_CLIENT', (11): u'ROLE_SYSTEM_MENUPOPUP', (12):\n u'ROLE_SYSTEM_MENUITEM', (13): u'ROLE_SYSTEM_TOOLTIP', (14):\n u'ROLE_SYSTEM_APPLICATION', (15): u'ROLE_SYSTEM_DOCUMENT', (16):\n u'ROLE_SYSTEM_PANE', (17): u'ROLE_SYSTEM_CHART', (18):\n u'ROLE_SYSTEM_DIALOG', (19): u'ROLE_SYSTEM_BORDER', (20):\n u'ROLE_SYSTEM_GROUPING', (21): u'ROLE_SYSTEM_SEPARATOR', (22):\n u'ROLE_SYSTEM_TOOLBAR', (23): u'ROLE_SYSTEM_STATUSBAR', (24):\n u'ROLE_SYSTEM_TABLE', (25): u'ROLE_SYSTEM_COLUMNHEADER', (26):\n u'ROLE_SYSTEM_ROWHEADER', (27): u'ROLE_SYSTEM_COLUMN', (28):\n u'ROLE_SYSTEM_ROW', (29): u'ROLE_SYSTEM_CELL', (30):\n u'ROLE_SYSTEM_LINK', (31): u'ROLE_SYSTEM_HELPBALLOON', (32):\n u'ROLE_SYSTEM_CHARACTER', (33): u'ROLE_SYSTEM_LIST', (34):\n u'ROLE_SYSTEM_LISTITEM', (35): u'ROLE_SYSTEM_OUTLINE', (36):\n u'ROLE_SYSTEM_OUTLINEITEM', (37): u'ROLE_SYSTEM_PAGETAB', (38):\n u'ROLE_SYSTEM_PROPERTYPAGE', (39): u'ROLE_SYSTEM_INDICATOR', (40):\n u'ROLE_SYSTEM_GRAPHIC', (41): u'ROLE_SYSTEM_STATICTEXT', (42):\n u'ROLE_SYSTEM_TEXT', (43): u'ROLE_SYSTEM_PUSHBUTTON', (44):\n u'ROLE_SYSTEM_CHECKBUTTON', (45): u'ROLE_SYSTEM_RADIOBUTTON', (46):\n u'ROLE_SYSTEM_COMBOBOX', (47): u'ROLE_SYSTEM_DROPLIST', (48):\n u'ROLE_SYSTEM_PROGRESSBAR', (49): u'ROLE_SYSTEM_DIAL', (50):\n u'ROLE_SYSTEM_HOTKEYFIELD', (51): u'ROLE_SYSTEM_SLIDER', (52):\n u'ROLE_SYSTEM_SPINBUTTON', (53): u'ROLE_SYSTEM_DIAGRAM', (54):\n u'ROLE_SYSTEM_ANIMATION', (55): u'ROLE_SYSTEM_EQUATION', (56):\n u'ROLE_SYSTEM_BUTTONDROPDOWN', (57): u'ROLE_SYSTEM_BUTTONMENU', (58):\n u'ROLE_SYSTEM_BUTTONDROPDOWNGRID', (59): u'ROLE_SYSTEM_WHITESPACE', (60\n ): u'ROLE_SYSTEM_PAGETABLIST', (61): u'ROLE_SYSTEM_CLOCK'}\nUNLOCALIZED_IA2_ROLE_NAMES = {(0): u'IA2_ROLE_UNKNOWN', (1025):\n u'IA2_ROLE_CANVAS', (1026): u'IA2_ROLE_CAPTION', (1027):\n u'IA2_ROLE_CHECK_MENU_ITEM', (1028): u'IA2_ROLE_COLOR_CHOOSER', (1029):\n u'IA2_ROLE_DATE_EDITOR', (1030): u'IA2_ROLE_DESKTOP_ICON', (1031):\n u'IA2_ROLE_DESKTOP_PANE', (1032): u'IA2_ROLE_DIRECTORY_PANE', (1033):\n u'IA2_ROLE_EDITBAR', (1034): u'IA2_ROLE_EMBEDDED_OBJECT', (1035):\n u'IA2_ROLE_ENDNOTE', (1036): u'IA2_ROLE_FILE_CHOOSER', (1037):\n u'IA2_ROLE_FONT_CHOOSER', (1038): u'IA2_ROLE_FOOTER', (1039):\n u'IA2_ROLE_FOOTNOTE', (1040): u'IA2_ROLE_FORM', (1041):\n u'IA2_ROLE_FRAME', (1042): u'IA2_ROLE_GLASS_PANE', (1043):\n u'IA2_ROLE_HEADER', (1044): u'IA2_ROLE_HEADING', (1045):\n u'IA2_ROLE_ICON', (1046): u'IA2_ROLE_IMAGE_MAP', (1047):\n u'IA2_ROLE_INPUT_METHOD_WINDOW', (1048): u'IA2_ROLE_INTERNAL_FRAME', (\n 1049): u'IA2_ROLE_LABEL', (1050): u'IA2_ROLE_LAYERED_PANE', (1051):\n u'IA2_ROLE_NOTE', (1052): u'IA2_ROLE_OPTION_PANE', (1053):\n u'IA2_ROLE_PAGE', (1054): u'IA2_ROLE_PARAGRAPH', (1055):\n u'IA2_ROLE_RADIO_MENU_ITEM', (1056): u'IA2_ROLE_REDUNDANT_OBJECT', (\n 1057): u'IA2_ROLE_ROOT_PANE', (1058): u'IA2_ROLE_RULER', (1059):\n u'IA2_ROLE_SCROLL_PANE', (1060): u'IA2_ROLE_SECTION', (1061):\n u'IA2_ROLE_SHAPE', (1062): u'IA2_ROLE_SPLIT_PANE', (1063):\n u'IA2_ROLE_TEAR_OFF_MENU', (1064): u'IA2_ROLE_TERMINAL', (1065):\n u'IA2_ROLE_TEXT_FRAME', (1066): u'IA2_ROLE_TOGGLE_BUTTON', (1067):\n u'IA2_ROLE_VIEW_PORT', (1068): u'IA2_ROLE_COMPLEMENTARY_CONTENT', (1069\n ): u'IA2_ROLE_LANDMARK'}\nNAVDIR_DOWN = 2\nNAVDIR_FIRSTCHILD = 7\nNAVDIR_LASTCHILD = 8\nNAVDIR_LEFT = 3\nNAVDIR_NEXT = 5\nNAVDIR_PREVIOUS = 6\nNAVDIR_RIGHT = 4\nNAVDIR_UP = 1\nSTATE_SYSTEM_UNAVAILABLE = 1\nSTATE_SYSTEM_SELECTED = 2\nSTATE_SYSTEM_FOCUSED = 4\nSTATE_SYSTEM_PRESSED = 8\nSTATE_SYSTEM_CHECKED = 16\nSTATE_SYSTEM_MIXED = 32\nSTATE_SYSTEM_READONLY = 64\nSTATE_SYSTEM_HOTTRACKED = 128\nSTATE_SYSTEM_DEFAULT = 256\nSTATE_SYSTEM_EXPANDED = 512\nSTATE_SYSTEM_COLLAPSED = 1024\nSTATE_SYSTEM_BUSY = 2048\nSTATE_SYSTEM_FLOATING = 4096\nSTATE_SYSTEM_MARQUEED = 8192\nSTATE_SYSTEM_ANIMATED = 16384\nSTATE_SYSTEM_INVISIBLE = 32768\nSTATE_SYSTEM_OFFSCREEN = 65536\nSTATE_SYSTEM_SIZEABLE = 131072\nSTATE_SYSTEM_MOVEABLE = 262144\nSTATE_SYSTEM_SELFVOICING = 524288\nSTATE_SYSTEM_FOCUSABLE = 1048576\nSTATE_SYSTEM_SELECTABLE = 2097152\nSTATE_SYSTEM_LINKED = 4194304\nSTATE_SYSTEM_TRAVERSED = 8388608\nSTATE_SYSTEM_MULTISELECTABLE = 16777216\nSTATE_SYSTEM_EXTSELECTABLE = 33554432\nSTATE_SYSTEM_HASSUBMENU = 67108864\nSTATE_SYSTEM_ALERT_LOW = 67108864\nSTATE_SYSTEM_ALERT_MEDIUM = 134217728\nSTATE_SYSTEM_ALERT_HIGH = 268435456\nSTATE_SYSTEM_PROTECTED = 536870912\nSTATE_SYSTEM_HASPOPUP = 1073741824\nSTATE_SYSTEM_VALID = 536870911\nUNLOCALIZED_STATE_NAMES = {(1): u'STATE_SYSTEM_UNAVAILABLE', (2):\n u'STATE_SYSTEM_SELECTED', (4): u'STATE_SYSTEM_FOCUSED', (8):\n u'STATE_SYSTEM_PRESSED', (16): u'STATE_SYSTEM_CHECKED', (32):\n u'STATE_SYSTEM_MIXED', (64): u'STATE_SYSTEM_READONLY', (128):\n u'STATE_SYSTEM_HOTTRACKED', (256): u'STATE_SYSTEM_DEFAULT', (512):\n u'STATE_SYSTEM_EXPANDED', (1024): u'STATE_SYSTEM_COLLAPSED', (2048):\n u'STATE_SYSTEM_BUSY', (4096): u'STATE_SYSTEM_FLOATING', (8192):\n u'STATE_SYSTEM_MARQUEED', (16384): u'STATE_SYSTEM_ANIMATED', (32768):\n u'STATE_SYSTEM_INVISIBLE', (65536): u'STATE_SYSTEM_OFFSCREEN', (131072):\n u'STATE_SYSTEM_SIZEABLE', (262144): u'STATE_SYSTEM_MOVEABLE', (524288):\n u'STATE_SYSTEM_SELFVOICING', (1048576): u'STATE_SYSTEM_FOCUSABLE', (\n 2097152): u'STATE_SYSTEM_SELECTABLE', (4194304): u'STATE_SYSTEM_LINKED',\n (8388608): u'STATE_SYSTEM_TRAVERSED', (16777216):\n u'STATE_SYSTEM_MULTISELECTABLE', (33554432):\n u'STATE_SYSTEM_EXTSELECTABLE', (67108864): u'STATE_SYSTEM_ALERT_LOW', (\n 134217728): u'STATE_SYSTEM_ALERT_MEDIUM', (268435456):\n u'STATE_SYSTEM_ALERT_HIGH', (536870912): u'STATE_SYSTEM_PROTECTED', (\n 1073741824): u'STATE_SYSTEM_HASPOPUP', (536870911): u'STATE_SYSTEM_VALID'}\nIA2_STATE_ACTIVE = 1\nIA2_STATE_ARMED = 2\nIA2_STATE_DEFUNCT = 4\nIA2_STATE_EDITABLE = 8\nIA2_STATE_HORIZONTAL = 16\nIA2_STATE_ICONIFIED = 32\nIA2_STATE_INVALID_ENTRY = 64\nIA2_STATE_MANAGES_DESCENDANTS = 128\nIA2_STATE_MODAL = 256\nIA2_STATE_MULTI_LINE = 512\nIA2_STATE_OPAQUE = 1024\nIA2_STATE_REQUIRED = 2048\nIA2_STATE_SELECTABLE_TEXT = 4096\nIA2_STATE_SINGLE_LINE = 8192\nIA2_STATE_STALE = 16384\nIA2_STATE_SUPPORTS_AUTOCOMPLETION = 32768\nIA2_STATE_TRANSIENT = 65536\nIA2_STATE_VERTICAL = 131072\nIA2_STATE_CHECKABLE = 262144\nIA2_STATE_PINNED = 524288\nUNLOCALIZED_IA2_STATE_NAMES = {(1): u'IA2_STATE_ACTIVE', (2):\n u'IA2_STATE_ARMED', (4): u'IA2_STATE_DEFUNCT', (8):\n u'IA2_STATE_EDITABLE', (16): u'IA2_STATE_HORIZONTAL', (32):\n u'IA2_STATE_ICONIFIED', (64): u'IA2_STATE_INVALID_ENTRY', (128):\n u'IA2_STATE_MANAGES_DESCENDANTS', (256): u'IA2_STATE_MODAL', (512):\n u'IA2_STATE_MULTI_LINE', (1024): u'IA2_STATE_OPAQUE', (2048):\n u'IA2_STATE_REQUIRED', (4096): u'IA2_STATE_SELECTABLE_TEXT', (8192):\n u'IA2_STATE_SINGLE_LINE', (16384): u'IA2_STATE_STALE', (32768):\n u'IA2_STATE_SUPPORTS_AUTOCOMPLETION', (65536): u'IA2_STATE_TRANSIENT',\n (131072): u'IA2_STATE_VERTICAL', (262144): u'IA2_STATE_CHECKABLE', (\n 524288): u'IA2_STATE_PINNED'}\nUNLOCALIZED_IA2_RELATION_TYPES = {u'containingApplication':\n u'IA2_RELATION_CONTAINING_APPLICATION', u'containingDocument':\n u'IA2_RELATION_CONTAINING_DOCUMENT', u'containingTabPane':\n u'IA2_RELATION_CONTAINING_TAB_PANE', u'containingWindow':\n u'IA2_RELATION_CONTAINING_WINDOW', u'controlledBy':\n u'IA2_RELATION_CONTROLLED_BY', u'controllerFor':\n u'IA2_RELATION_CONTROLLER_FOR', u'describedBy':\n u'IA2_RELATION_DESCRIBED_BY', u'descriptionFor':\n u'IA2_RELATION_DESCRIPTION_FOR', u'details': u'IA2_RELATION_DETAILS',\n u'detailsFor': u'IA2_RELATION_DETAILS_FOR', u'embeddedBy':\n u'IA2_RELATION_EMBEDDED_BY', u'embeds': u'IA2_RELATION_EMBEDS',\n u'errorMessage': u'IA2_RELATION_ERROR_MESSAGE', u'errorFor':\n u'IA2_RELATION_ERROR_FOR', u'flowsFrom': u'IA2_RELATION_FLOWS_FROM',\n u'flowsTo': u'IA2_RELATION_FLOWS_TO', u'labelFor':\n u'IA2_RELATION_LABEL_FOR', u'labelledBy': u'IA2_RELATION_LABELED_BY',\n u'labelledBy': u'IA2_RELATION_LABELLED_BY', u'memberOf':\n u'IA2_RELATION_MEMBER_OF', u'nextTabbable':\n u'IA2_RELATION_NEXT_TABBABLE', u'nodeChildOf':\n u'IA2_RELATION_NODE_CHILD_OF', u'nodeParentOf':\n u'IA2_RELATION_NODE_PARENT_OF', u'parentWindowOf':\n u'IA2_RELATION_PARENT_WINDOW_OF', u'popupFor':\n u'IA2_RELATION_POPUP_FOR', u'previousTabbable':\n u'IA2_RELATION_PREVIOUS_TABBABLE', u'subwindowOf':\n u'IA2_RELATION_SUBWINDOW_OF'}\nWINEVENT_OUTOFCONTEXT = 0\nWINEVENT_SKIPOWNTHREAD = 1\nWINEVENT_SKIPOWNPROCESS = 2\nWINEVENT_INCONTEXT = 4\nEVENT_SYSTEM_SOUND = 1\nEVENT_SYSTEM_ALERT = 2\nEVENT_SYSTEM_FOREGROUND = 3\nEVENT_SYSTEM_MENUSTART = 4\nEVENT_SYSTEM_MENUEND = 5\nEVENT_SYSTEM_MENUPOPUPSTART = 6\nEVENT_SYSTEM_MENUPOPUPEND = 7\nEVENT_SYSTEM_CAPTURESTART = 8\nEVENT_SYSTEM_CAPTUREEND = 9\nEVENT_SYSTEM_MOVESIZESTART = 10\nEVENT_SYSTEM_MOVESIZEEND = 11\nEVENT_SYSTEM_CONTEXTHELPSTART = 12\nEVENT_SYSTEM_CONTEXTHELPEND = 13\nEVENT_SYSTEM_DRAGDROPSTART = 14\nEVENT_SYSTEM_DRAGDROPEND = 15\nEVENT_SYSTEM_DIALOGSTART = 16\nEVENT_SYSTEM_DIALOGEND = 17\nEVENT_SYSTEM_SCROLLINGSTART = 18\nEVENT_SYSTEM_SCROLLINGEND = 19\nEVENT_SYSTEM_SWITCHSTART = 20\nEVENT_SYSTEM_SWITCHEND = 21\nEVENT_SYSTEM_MINIMIZESTART = 22\nEVENT_SYSTEM_MINIMIZEEND = 23\nEVENT_OBJECT_CREATE = 32768\nEVENT_OBJECT_DESTROY = 32769\nEVENT_OBJECT_SHOW = 32770\nEVENT_OBJECT_HIDE = 32771\nEVENT_OBJECT_REORDER = 32772\nEVENT_OBJECT_FOCUS = 32773\nEVENT_OBJECT_SELECTION = 32774\nEVENT_OBJECT_SELECTIONADD = 32775\nEVENT_OBJECT_SELECTIONREMOVE = 32776\nEVENT_OBJECT_SELECTIONWITHIN = 32777\nEVENT_OBJECT_STATECHANGE = 32778\nEVENT_OBJECT_LOCATIONCHANGE = 32779\nEVENT_OBJECT_NAMECHANGE = 32780\nEVENT_OBJECT_DESCRIPTIONCHANGE = 32781\nEVENT_OBJECT_VALUECHANGE = 32782\nEVENT_OBJECT_PARENTCHANGE = 32783\nEVENT_OBJECT_HELPCHANGE = 32784\nEVENT_OBJECT_DEFACTIONCHANGE = 32785\nEVENT_OBJECT_ACCELERATORCHANGE = 32786\nEVENT_CONSOLE_CARET = 16385\nEVENT_CONSOLE_UPDATE_REGION = 16386\nEVENT_CONSOLE_UPDATE_SIMPLE = 16387\nEVENT_CONSOLE_UPDATE_SCROLL = 16388\nEVENT_CONSOLE_LAYOUT = 16389\nEVENT_CONSOLE_START_APPLICATION = 16390\nEVENT_CONSOLE_END_APPLICATION = 16391\nIA2_EVENT_ACTION_CHANGED = 257\nIA2_EVENT_ACTIVE_DECENDENT_CHANGED = 258\nIA2_EVENT_ACTIVE_DESCENDANT_CHANGED = 258\nIA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED = 259\nIA2_EVENT_DOCUMENT_CONTENT_CHANGED = 260\nIA2_EVENT_DOCUMENT_LOAD_COMPLETE = 261\nIA2_EVENT_DOCUMENT_LOAD_STOPPED = 262\nIA2_EVENT_DOCUMENT_RELOAD = 263\nIA2_EVENT_HYPERLINK_END_INDEX_CHANGED = 264\nIA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED = 265\nIA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED = 266\nIA2_EVENT_HYPERTEXT_LINK_ACTIVATED = 267\nIA2_EVENT_HYPERTEXT_LINK_SELECTED = 268\nIA2_EVENT_HYPERLINK_START_INDEX_CHANGED = 269\nIA2_EVENT_HYPERTEXT_CHANGED = 270\nIA2_EVENT_HYPERTEXT_NLINKS_CHANGED = 287\nIA2_EVENT_OBJECT_ATTRIBUTE_CHANGED = 288\nIA2_EVENT_PAGE_CHANGED = 273\nIA2_EVENT_SECTION_CHANGED = 274\nIA2_EVENT_TABLE_CAPTION_CHANGED = 275\nIA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED = 276\nIA2_EVENT_TABLE_COLUMN_HEADER_CHANGED = 277\nIA2_EVENT_TABLE_MODEL_CHANGED = 278\nIA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED = 279\nIA2_EVENT_TABLE_ROW_HEADER_CHANGED = 280\nIA2_EVENT_TABLE_SUMMARY_CHANGED = 281\nIA2_EVENT_TEXT_ATTRIBUTE_CHANGED = 282\nIA2_EVENT_TEXT_CARET_MOVED = 283\nIA2_EVENT_TEXT_CHANGED = 284\nIA2_EVENT_TEXT_COLUMN_CHANGED = 285\nIA2_EVENT_TEXT_INSERTED = 286\nIA2_EVENT_TEXT_REMOVED = 287\nIA2_EVENT_TEXT_UPDATED = 288\nIA2_EVENT_TEXT_SELECTION_CHANGED = 289\nIA2_EVENT_VISIBLE_DATA_CHANGED = 290\nUNLOCALIZED_EVENT_NAMES = {(1): u'EVENT_SYSTEM_SOUND', (2):\n u'EVENT_SYSTEM_ALERT', (3): u'EVENT_SYSTEM_FOREGROUND', (4):\n u'EVENT_SYSTEM_MENUSTART', (5): u'EVENT_SYSTEM_MENUEND', (6):\n u'EVENT_SYSTEM_MENUPOPUPSTART', (7): u'EVENT_SYSTEM_MENUPOPUPEND', (8):\n u'EVENT_SYSTEM_CAPTURESTART', (9): u'EVENT_SYSTEM_CAPTUREEND', (10):\n u'EVENT_SYSTEM_MOVESIZESTART', (11): u'EVENT_SYSTEM_MOVESIZEEND', (12):\n u'EVENT_SYSTEM_CONTEXTHELPSTART', (13): u'EVENT_SYSTEM_CONTEXTHELPEND',\n (14): u'EVENT_SYSTEM_DRAGDROPSTART', (15): u'EVENT_SYSTEM_DRAGDROPEND',\n (16): u'EVENT_SYSTEM_DIALOGSTART', (17): u'EVENT_SYSTEM_DIALOGEND', (18\n ): u'EVENT_SYSTEM_SCROLLINGSTART', (19): u'EVENT_SYSTEM_SCROLLINGEND',\n (20): u'EVENT_SYSTEM_SWITCHSTART', (21): u'EVENT_SYSTEM_SWITCHEND', (22\n ): u'EVENT_SYSTEM_MINIMIZESTART', (23): u'EVENT_SYSTEM_MINIMIZEEND', (\n 257): u'IA2_EVENT_ACTION_CHANGED', (258):\n u'IA2_EVENT_ACTIVE_DESCENDANT_CHANGED', (259):\n u'IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED', (260):\n u'IA2_EVENT_DOCUMENT_CONTENT_CHANGED', (261):\n u'IA2_EVENT_DOCUMENT_LOAD_COMPLETE', (262):\n u'IA2_EVENT_DOCUMENT_LOAD_STOPPED', (263): u'IA2_EVENT_DOCUMENT_RELOAD',\n (264): u'IA2_EVENT_HYPERLINK_END_INDEX_CHANGED', (265):\n u'IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED', (266):\n u'IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED', (267):\n u'IA2_EVENT_HYPERTEXT_LINK_ACTIVATED', (268):\n u'IA2_EVENT_HYPERTEXT_LINK_SELECTED', (269):\n u'IA2_EVENT_HYPERLINK_START_INDEX_CHANGED', (270):\n u'IA2_EVENT_HYPERTEXT_CHANGED', (271):\n u'IA2_EVENT_HYPERTEXT_NLINKS_CHANGED', (272):\n u'IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED', (273): u'IA2_EVENT_PAGE_CHANGED',\n (274): u'IA2_EVENT_SECTION_CHANGED', (275):\n u'IA2_EVENT_TABLE_CAPTION_CHANGED', (276):\n u'IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED', (277):\n u'IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED', (278):\n u'IA2_EVENT_TABLE_MODEL_CHANGED', (279):\n u'IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED', (280):\n u'IA2_EVENT_TABLE_ROW_HEADER_CHANGED', (281):\n u'IA2_EVENT_TABLE_SUMMARY_CHANGED', (282):\n u'IA2_EVENT_TEXT_ATTRIBUTE_CHANGED', (283):\n u'IA2_EVENT_TEXT_CARET_MOVED', (284): u'IA2_EVENT_TEXT_CHANGED', (285):\n u'IA2_EVENT_TEXT_COLUMN_CHANGED', (286): u'IA2_EVENT_TEXT_INSERTED', (\n 287): u'IA2_EVENT_TEXT_REMOVED', (288): u'IA2_EVENT_TEXT_UPDATED', (289\n ): u'IA2_EVENT_TEXT_SELECTION_CHANGED', (290):\n u'IA2_EVENT_VISIBLE_DATA_CHANGED', (16385): u'EVENT_CONSOLE_CARET', (\n 16386): u'EVENT_CONSOLE_UPDATE_REGION', (16387):\n u'EVENT_CONSOLE_UPDATE_SIMPLE', (16388): u'EVENT_CONSOLE_UPDATE_SCROLL',\n (16389): u'EVENT_CONSOLE_LAYOUT', (16390):\n u'EVENT_CONSOLE_START_APPLICATION', (16391):\n u'EVENT_CONSOLE_END_APPLICATION', (32768): u'EVENT_OBJECT_CREATE', (\n 32769): u'EVENT_OBJECT_DESTROY', (32770): u'EVENT_OBJECT_SHOW', (32771):\n u'EVENT_OBJECT_HIDE', (32772): u'EVENT_OBJECT_REORDER', (32773):\n u'EVENT_OBJECT_FOCUS', (32774): u'EVENT_OBJECT_SELECTION', (32775):\n u'EVENT_OBJECT_SELECTIONADD', (32776): u'EVENT_OBJECT_SELECTIONREMOVE',\n (32777): u'EVENT_OBJECT_SELECTIONWITHIN', (32778):\n u'EVENT_OBJECT_STATECHANGE', (32779): u'EVENT_OBJECT_LOCATIONCHANGE', (\n 32780): u'EVENT_OBJECT_NAMECHANGE', (32781):\n u'EVENT_OBJECT_DESCRIPTIONCHANGE', (32782): u'EVENT_OBJECT_VALUECHANGE',\n (32783): u'EVENT_OBJECT_PARENTCHANGE', (32784):\n u'EVENT_OBJECT_HELPCHANGE', (32785): u'EVENT_OBJECT_DEFACTIONCHANGE', (\n 32786): u'EVENT_OBJECT_ACCELERATORCHANGE'}\nwinEventIDsToEventNames = {}\nfor _sym, _val in locals().items():\n if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):\n winEventIDsToEventNames[_val] = _sym\n",
"step-4": "'''\nUseful constants.\n\nInspired by pyatspi:\nhttp://live.gnome.org/GAP/PythonATSPI\n\n@author: Eitan Isaacson\n@copyright: Copyright (c) 2008, Eitan Isaacson\n@license: LGPL\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Library General Public\nLicense as published by the Free Software Foundation; either\nversion 2 of the License, or (at your option) any later version.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLibrary General Public License for more details.\n\nYou should have received a copy of the GNU Library General Public\nLicense along with this library; if not, write to the\nFree Software Foundation, Inc., 59 Temple Place - Suite 330,\nBoston, MA 02111-1307, USA.\n'''\n# Child ID.\nCHILDID_SELF = 0\n\n# IAccessibleText Constants\nIA2_TEXT_OFFSET_LENGTH = -1\nIA2_TEXT_OFFSET_CARET = -2\n\n# Accessible Roles\n# TODO: Is there a way to retrieve this at runtime or build time?\n#\nROLE_SYSTEM_ALERT = 8\nROLE_SYSTEM_ANIMATION = 54\nROLE_SYSTEM_APPLICATION = 14\nROLE_SYSTEM_BORDER = 19\nROLE_SYSTEM_BUTTONDROPDOWN = 56\nROLE_SYSTEM_BUTTONDROPDOWNGRID = 58\nROLE_SYSTEM_BUTTONMENU = 57\nROLE_SYSTEM_CARET = 7\nROLE_SYSTEM_CELL = 29\nROLE_SYSTEM_CHARACTER = 32\nROLE_SYSTEM_CHART = 17\nROLE_SYSTEM_CHECKBUTTON = 44\nROLE_SYSTEM_CLIENT = 10\nROLE_SYSTEM_CLOCK = 61\nROLE_SYSTEM_COLUMN = 27\nROLE_SYSTEM_COLUMNHEADER = 25\nROLE_SYSTEM_COMBOBOX = 46\nROLE_SYSTEM_CURSOR = 6\nROLE_SYSTEM_DIAGRAM = 53\nROLE_SYSTEM_DIAL = 49\nROLE_SYSTEM_DIALOG = 18\nROLE_SYSTEM_DOCUMENT = 15\nROLE_SYSTEM_DROPLIST = 47\nROLE_SYSTEM_EQUATION = 55\nROLE_SYSTEM_GRAPHIC = 40\nROLE_SYSTEM_GRIP = 4\nROLE_SYSTEM_GROUPING = 20\nROLE_SYSTEM_HELPBALLOON = 31\nROLE_SYSTEM_HOTKEYFIELD = 50\nROLE_SYSTEM_INDICATOR = 39\nROLE_SYSTEM_LINK = 30\nROLE_SYSTEM_LIST = 33\nROLE_SYSTEM_LISTITEM = 34\nROLE_SYSTEM_MENUBAR = 2\nROLE_SYSTEM_MENUITEM = 12\nROLE_SYSTEM_MENUPOPUP = 11\nROLE_SYSTEM_OUTLINE = 35\nROLE_SYSTEM_OUTLINEITEM = 36\nROLE_SYSTEM_PAGETAB = 37\nROLE_SYSTEM_PAGETABLIST = 60\nROLE_SYSTEM_PANE = 16\nROLE_SYSTEM_PROGRESSBAR = 48\nROLE_SYSTEM_PROPERTYPAGE = 38\nROLE_SYSTEM_PUSHBUTTON = 43\nROLE_SYSTEM_RADIOBUTTON = 45\nROLE_SYSTEM_ROW = 28\nROLE_SYSTEM_ROWHEADER = 26\nROLE_SYSTEM_SCROLLBAR = 3\nROLE_SYSTEM_SEPARATOR = 21\nROLE_SYSTEM_SLIDER = 51\nROLE_SYSTEM_SOUND = 5\nROLE_SYSTEM_SPINBUTTON = 52\nROLE_SYSTEM_STATICTEXT = 41\nROLE_SYSTEM_STATUSBAR = 23\nROLE_SYSTEM_TABLE = 24\nROLE_SYSTEM_TEXT = 42\nROLE_SYSTEM_TITLEBAR = 1\nROLE_SYSTEM_TOOLBAR = 22\nROLE_SYSTEM_TOOLTIP = 13\nROLE_SYSTEM_WHITESPACE = 59\nROLE_SYSTEM_WINDOW = 9\n\nIA2_ROLE_UNKNOWN = 0\nIA2_ROLE_CANVAS = 0x401\nIA2_ROLE_CAPTION = 0x402\nIA2_ROLE_CHECK_MENU_ITEM = 0x403\nIA2_ROLE_COLOR_CHOOSER = 0x404\nIA2_ROLE_DATE_EDITOR = 0x405\nIA2_ROLE_DESKTOP_ICON = 0x406\nIA2_ROLE_DESKTOP_PANE = 0x407\nIA2_ROLE_DIRECTORY_PANE = 0x408\nIA2_ROLE_EDITBAR = 0x409\nIA2_ROLE_EMBEDDED_OBJECT = 0x40a\nIA2_ROLE_ENDNOTE = 0x40b\nIA2_ROLE_FILE_CHOOSER = 0x40c\nIA2_ROLE_FONT_CHOOSER = 0x40d\nIA2_ROLE_FOOTER = 0x40e\nIA2_ROLE_FOOTNOTE = 0x40f\nIA2_ROLE_FORM = 0x410\nIA2_ROLE_FRAME = 0x411\nIA2_ROLE_GLASS_PANE = 0x412\nIA2_ROLE_HEADER = 0x413\nIA2_ROLE_HEADING = 0x414\nIA2_ROLE_ICON = 0x415\nIA2_ROLE_IMAGE_MAP = 0x416\nIA2_ROLE_INPUT_METHOD_WINDOW = 0x417\nIA2_ROLE_INTERNAL_FRAME = 0x418\nIA2_ROLE_LABEL = 0x419\nIA2_ROLE_LAYERED_PANE = 0x41a\nIA2_ROLE_NOTE = 0x41b\nIA2_ROLE_OPTION_PANE = 0x41c\nIA2_ROLE_PAGE = 0x41d\nIA2_ROLE_PARAGRAPH = 0x41e\nIA2_ROLE_RADIO_MENU_ITEM = 0x41f\nIA2_ROLE_REDUNDANT_OBJECT = 0x420\nIA2_ROLE_ROOT_PANE = 0x421\nIA2_ROLE_RULER = 0x422\nIA2_ROLE_SCROLL_PANE = 0x423\nIA2_ROLE_SECTION = 0x424\nIA2_ROLE_SHAPE = 0x425\nIA2_ROLE_SPLIT_PANE = 0x426\nIA2_ROLE_TEAR_OFF_MENU = 0x427\nIA2_ROLE_TERMINAL = 0x428\nIA2_ROLE_TEXT_FRAME = 0x429\nIA2_ROLE_TOGGLE_BUTTON = 0x42a\nIA2_ROLE_VIEW_PORT = 0x42b\nIA2_ROLE_COMPLEMENTARY_CONTENT = 0x42c\nIA2_ROLE_LANDMARK = 0x42d\n\n\n\n# Unlocalized role strings\nUNLOCALIZED_ROLE_NAMES = {\n 1: u'ROLE_SYSTEM_TITLEBAR',\n 2: u'ROLE_SYSTEM_MENUBAR',\n 3: u'ROLE_SYSTEM_SCROLLBAR',\n 4: u'ROLE_SYSTEM_GRIP',\n 5: u'ROLE_SYSTEM_SOUND',\n 6: u'ROLE_SYSTEM_CURSOR',\n 7: u'ROLE_SYSTEM_CARET',\n 8: u'ROLE_SYSTEM_ALERT',\n 9: u'ROLE_SYSTEM_WINDOW',\n 10: u'ROLE_SYSTEM_CLIENT',\n 11: u'ROLE_SYSTEM_MENUPOPUP',\n 12: u'ROLE_SYSTEM_MENUITEM',\n 13: u'ROLE_SYSTEM_TOOLTIP',\n 14: u'ROLE_SYSTEM_APPLICATION',\n 15: u'ROLE_SYSTEM_DOCUMENT',\n 16: u'ROLE_SYSTEM_PANE',\n 17: u'ROLE_SYSTEM_CHART',\n 18: u'ROLE_SYSTEM_DIALOG',\n 19: u'ROLE_SYSTEM_BORDER',\n 20: u'ROLE_SYSTEM_GROUPING',\n 21: u'ROLE_SYSTEM_SEPARATOR',\n 22: u'ROLE_SYSTEM_TOOLBAR',\n 23: u'ROLE_SYSTEM_STATUSBAR',\n 24: u'ROLE_SYSTEM_TABLE',\n 25: u'ROLE_SYSTEM_COLUMNHEADER',\n 26: u'ROLE_SYSTEM_ROWHEADER',\n 27: u'ROLE_SYSTEM_COLUMN',\n 28: u'ROLE_SYSTEM_ROW',\n 29: u'ROLE_SYSTEM_CELL',\n 30: u'ROLE_SYSTEM_LINK',\n 31: u'ROLE_SYSTEM_HELPBALLOON',\n 32: u'ROLE_SYSTEM_CHARACTER',\n 33: u'ROLE_SYSTEM_LIST',\n 34: u'ROLE_SYSTEM_LISTITEM',\n 35: u'ROLE_SYSTEM_OUTLINE',\n 36: u'ROLE_SYSTEM_OUTLINEITEM',\n 37: u'ROLE_SYSTEM_PAGETAB',\n 38: u'ROLE_SYSTEM_PROPERTYPAGE',\n 39: u'ROLE_SYSTEM_INDICATOR',\n 40: u'ROLE_SYSTEM_GRAPHIC',\n 41: u'ROLE_SYSTEM_STATICTEXT',\n 42: u'ROLE_SYSTEM_TEXT',\n 43: u'ROLE_SYSTEM_PUSHBUTTON',\n 44: u'ROLE_SYSTEM_CHECKBUTTON',\n 45: u'ROLE_SYSTEM_RADIOBUTTON',\n 46: u'ROLE_SYSTEM_COMBOBOX',\n 47: u'ROLE_SYSTEM_DROPLIST',\n 48: u'ROLE_SYSTEM_PROGRESSBAR',\n 49: u'ROLE_SYSTEM_DIAL',\n 50: u'ROLE_SYSTEM_HOTKEYFIELD',\n 51: u'ROLE_SYSTEM_SLIDER',\n 52: u'ROLE_SYSTEM_SPINBUTTON',\n 53: u'ROLE_SYSTEM_DIAGRAM',\n 54: u'ROLE_SYSTEM_ANIMATION',\n 55: u'ROLE_SYSTEM_EQUATION',\n 56: u'ROLE_SYSTEM_BUTTONDROPDOWN',\n 57: u'ROLE_SYSTEM_BUTTONMENU',\n 58: u'ROLE_SYSTEM_BUTTONDROPDOWNGRID',\n 59: u'ROLE_SYSTEM_WHITESPACE',\n 60: u'ROLE_SYSTEM_PAGETABLIST',\n 61: u'ROLE_SYSTEM_CLOCK'}\n\n# Unlocalized role strings\nUNLOCALIZED_IA2_ROLE_NAMES = {\n 0x000: u'IA2_ROLE_UNKNOWN',\n 0x401: u'IA2_ROLE_CANVAS',\n 0x402: u'IA2_ROLE_CAPTION',\n 0x403: u'IA2_ROLE_CHECK_MENU_ITEM',\n 0x404: u'IA2_ROLE_COLOR_CHOOSER',\n 0x405: u'IA2_ROLE_DATE_EDITOR',\n 0x406: u'IA2_ROLE_DESKTOP_ICON',\n 0x407: u'IA2_ROLE_DESKTOP_PANE',\n 0x408: u'IA2_ROLE_DIRECTORY_PANE',\n 0x409: u'IA2_ROLE_EDITBAR',\n 0x40a: u'IA2_ROLE_EMBEDDED_OBJECT',\n 0x40b: u'IA2_ROLE_ENDNOTE',\n 0x40c: u'IA2_ROLE_FILE_CHOOSER',\n 0x40d: u'IA2_ROLE_FONT_CHOOSER',\n 0x40e: u'IA2_ROLE_FOOTER',\n 0x40f: u'IA2_ROLE_FOOTNOTE',\n 0x410: u'IA2_ROLE_FORM',\n 0x411: u'IA2_ROLE_FRAME',\n 0x412: u'IA2_ROLE_GLASS_PANE',\n 0x413: u'IA2_ROLE_HEADER',\n 0x414: u'IA2_ROLE_HEADING',\n 0x415: u'IA2_ROLE_ICON',\n 0x416: u'IA2_ROLE_IMAGE_MAP',\n 0x417: u'IA2_ROLE_INPUT_METHOD_WINDOW',\n 0x418: u'IA2_ROLE_INTERNAL_FRAME',\n 0x419: u'IA2_ROLE_LABEL',\n 0x41a: u'IA2_ROLE_LAYERED_PANE',\n 0x41b: u'IA2_ROLE_NOTE',\n 0x41c: u'IA2_ROLE_OPTION_PANE',\n 0x41d: u'IA2_ROLE_PAGE',\n 0x41e: u'IA2_ROLE_PARAGRAPH',\n 0x41f: u'IA2_ROLE_RADIO_MENU_ITEM',\n 0x420: u'IA2_ROLE_REDUNDANT_OBJECT',\n 0x421: u'IA2_ROLE_ROOT_PANE',\n 0x422: u'IA2_ROLE_RULER',\n 0x423: u'IA2_ROLE_SCROLL_PANE',\n 0x424: u'IA2_ROLE_SECTION',\n 0x425: u'IA2_ROLE_SHAPE',\n 0x426: u'IA2_ROLE_SPLIT_PANE',\n 0x427: u'IA2_ROLE_TEAR_OFF_MENU',\n 0x428: u'IA2_ROLE_TERMINAL',\n 0x429: u'IA2_ROLE_TEXT_FRAME',\n 0x42a: u'IA2_ROLE_TOGGLE_BUTTON',\n 0x42b: u'IA2_ROLE_VIEW_PORT',\n 0x42c: u'IA2_ROLE_COMPLEMENTARY_CONTENT',\n 0x42d: u'IA2_ROLE_LANDMARK'}\n\n# Navigation constants\nNAVDIR_DOWN = 2\nNAVDIR_FIRSTCHILD = 7\nNAVDIR_LASTCHILD = 8\nNAVDIR_LEFT = 3\nNAVDIR_NEXT = 5\nNAVDIR_PREVIOUS = 6\nNAVDIR_RIGHT = 4\nNAVDIR_UP = 1\n\nSTATE_SYSTEM_UNAVAILABLE = 0x1\nSTATE_SYSTEM_SELECTED = 0x2\nSTATE_SYSTEM_FOCUSED = 0x4\nSTATE_SYSTEM_PRESSED = 0x8\nSTATE_SYSTEM_CHECKED = 0x10\nSTATE_SYSTEM_MIXED = 0x20\nSTATE_SYSTEM_READONLY = 0x40\nSTATE_SYSTEM_HOTTRACKED = 0x80\nSTATE_SYSTEM_DEFAULT = 0x100\nSTATE_SYSTEM_EXPANDED = 0x200\nSTATE_SYSTEM_COLLAPSED = 0x400\nSTATE_SYSTEM_BUSY = 0x800\nSTATE_SYSTEM_FLOATING = 0x1000\nSTATE_SYSTEM_MARQUEED = 0x2000\nSTATE_SYSTEM_ANIMATED = 0x4000\nSTATE_SYSTEM_INVISIBLE = 0x8000\nSTATE_SYSTEM_OFFSCREEN = 0x10000\nSTATE_SYSTEM_SIZEABLE = 0x20000\nSTATE_SYSTEM_MOVEABLE = 0x40000\nSTATE_SYSTEM_SELFVOICING = 0x80000\nSTATE_SYSTEM_FOCUSABLE = 0x100000\nSTATE_SYSTEM_SELECTABLE = 0x200000\nSTATE_SYSTEM_LINKED = 0x400000\nSTATE_SYSTEM_TRAVERSED = 0x800000\nSTATE_SYSTEM_MULTISELECTABLE = 0x1000000\nSTATE_SYSTEM_EXTSELECTABLE = 0x2000000\nSTATE_SYSTEM_HASSUBMENU = 0x4000000\nSTATE_SYSTEM_ALERT_LOW = 0x4000000\nSTATE_SYSTEM_ALERT_MEDIUM = 0x8000000\nSTATE_SYSTEM_ALERT_HIGH = 0x10000000\nSTATE_SYSTEM_PROTECTED = 0x20000000\nSTATE_SYSTEM_HASPOPUP = 0x40000000\nSTATE_SYSTEM_VALID = 0x1fffffff\n\n\n# Unlocalized state strings\nUNLOCALIZED_STATE_NAMES = {\n 1: u'STATE_SYSTEM_UNAVAILABLE',\n 2: u'STATE_SYSTEM_SELECTED',\n 4: u'STATE_SYSTEM_FOCUSED',\n 8: u'STATE_SYSTEM_PRESSED',\n 16: u'STATE_SYSTEM_CHECKED',\n 32: u'STATE_SYSTEM_MIXED',\n 64: u'STATE_SYSTEM_READONLY',\n 128: u'STATE_SYSTEM_HOTTRACKED',\n 256: u'STATE_SYSTEM_DEFAULT',\n 512: u'STATE_SYSTEM_EXPANDED',\n 1024: u'STATE_SYSTEM_COLLAPSED',\n 2048: u'STATE_SYSTEM_BUSY',\n 4096: u'STATE_SYSTEM_FLOATING',\n 8192: u'STATE_SYSTEM_MARQUEED',\n 16384: u'STATE_SYSTEM_ANIMATED',\n 32768: u'STATE_SYSTEM_INVISIBLE',\n 65536: u'STATE_SYSTEM_OFFSCREEN',\n 131072: u'STATE_SYSTEM_SIZEABLE',\n 262144: u'STATE_SYSTEM_MOVEABLE',\n 524288: u'STATE_SYSTEM_SELFVOICING',\n 1048576: u'STATE_SYSTEM_FOCUSABLE',\n 2097152: u'STATE_SYSTEM_SELECTABLE',\n 4194304: u'STATE_SYSTEM_LINKED',\n 8388608: u'STATE_SYSTEM_TRAVERSED',\n 16777216: u'STATE_SYSTEM_MULTISELECTABLE',\n 33554432: u'STATE_SYSTEM_EXTSELECTABLE',\n 67108864: u'STATE_SYSTEM_ALERT_LOW',\n 134217728: u'STATE_SYSTEM_ALERT_MEDIUM',\n 268435456: u'STATE_SYSTEM_ALERT_HIGH',\n 536870912: u'STATE_SYSTEM_PROTECTED',\n 1073741824: u'STATE_SYSTEM_HASPOPUP',\n 0x1fffffff: u'STATE_SYSTEM_VALID'}\n\nIA2_STATE_ACTIVE = 0x1\nIA2_STATE_ARMED = 0x2\nIA2_STATE_DEFUNCT = 0x4\nIA2_STATE_EDITABLE = 0x8\nIA2_STATE_HORIZONTAL = 0x10\nIA2_STATE_ICONIFIED = 0x20\nIA2_STATE_INVALID_ENTRY = 0x40\nIA2_STATE_MANAGES_DESCENDANTS = 0x80\nIA2_STATE_MODAL = 0x100\nIA2_STATE_MULTI_LINE = 0x200\nIA2_STATE_OPAQUE = 0x400\nIA2_STATE_REQUIRED = 0x800\nIA2_STATE_SELECTABLE_TEXT = 0x1000\nIA2_STATE_SINGLE_LINE = 0x2000\nIA2_STATE_STALE = 0x4000\nIA2_STATE_SUPPORTS_AUTOCOMPLETION = 0x8000\nIA2_STATE_TRANSIENT = 0x10000\nIA2_STATE_VERTICAL = 0x20000\nIA2_STATE_CHECKABLE = 0x40000\nIA2_STATE_PINNED = 0x80000\n\nUNLOCALIZED_IA2_STATE_NAMES = {\n 1: u'IA2_STATE_ACTIVE',\n 2: u'IA2_STATE_ARMED',\n 4: u'IA2_STATE_DEFUNCT',\n 8: u'IA2_STATE_EDITABLE',\n 16: u'IA2_STATE_HORIZONTAL',\n 32: u'IA2_STATE_ICONIFIED',\n 64: u'IA2_STATE_INVALID_ENTRY',\n 128: u'IA2_STATE_MANAGES_DESCENDANTS',\n 256: u'IA2_STATE_MODAL',\n 512: u'IA2_STATE_MULTI_LINE',\n 1024: u'IA2_STATE_OPAQUE',\n 2048: u'IA2_STATE_REQUIRED',\n 4096: u'IA2_STATE_SELECTABLE_TEXT',\n 8192: u'IA2_STATE_SINGLE_LINE',\n 16384: u'IA2_STATE_STALE',\n 32768: u'IA2_STATE_SUPPORTS_AUTOCOMPLETION',\n 65536: u'IA2_STATE_TRANSIENT',\n 131072: u'IA2_STATE_VERTICAL',\n 262144: u'IA2_STATE_CHECKABLE',\n 524288: u'IA2_STATE_PINNED'}\n\nUNLOCALIZED_IA2_RELATION_TYPES = {\n u'containingApplication' : u'IA2_RELATION_CONTAINING_APPLICATION',\n u'containingDocument' : u'IA2_RELATION_CONTAINING_DOCUMENT',\n u'containingTabPane' : u'IA2_RELATION_CONTAINING_TAB_PANE',\n u'containingWindow' : u'IA2_RELATION_CONTAINING_WINDOW',\n u'controlledBy' : u'IA2_RELATION_CONTROLLED_BY',\n u'controllerFor' : u'IA2_RELATION_CONTROLLER_FOR',\n u'describedBy' : u'IA2_RELATION_DESCRIBED_BY',\n u'descriptionFor' : u'IA2_RELATION_DESCRIPTION_FOR',\n u'details' : u'IA2_RELATION_DETAILS',\n u'detailsFor' : u'IA2_RELATION_DETAILS_FOR',\n u'embeddedBy' : u'IA2_RELATION_EMBEDDED_BY',\n u'embeds' : u'IA2_RELATION_EMBEDS',\n u'errorMessage' : u'IA2_RELATION_ERROR_MESSAGE',\n u'errorFor' : u'IA2_RELATION_ERROR_FOR',\n u'flowsFrom' : u'IA2_RELATION_FLOWS_FROM',\n u'flowsTo' : u'IA2_RELATION_FLOWS_TO',\n u'labelFor' : u'IA2_RELATION_LABEL_FOR',\n u'labelledBy' : u'IA2_RELATION_LABELED_BY',\n u'labelledBy' : u'IA2_RELATION_LABELLED_BY',\n u'memberOf' : u'IA2_RELATION_MEMBER_OF',\n u'nextTabbable' : u'IA2_RELATION_NEXT_TABBABLE',\n u'nodeChildOf' : u'IA2_RELATION_NODE_CHILD_OF',\n u'nodeParentOf' : u'IA2_RELATION_NODE_PARENT_OF',\n u'parentWindowOf' : u'IA2_RELATION_PARENT_WINDOW_OF',\n u'popupFor' : u'IA2_RELATION_POPUP_FOR',\n u'previousTabbable' : u'IA2_RELATION_PREVIOUS_TABBABLE',\n u'subwindowOf' : u'IA2_RELATION_SUBWINDOW_OF'}\n\n\n# SetWinEventHook() flags\nWINEVENT_OUTOFCONTEXT = 0x0\nWINEVENT_SKIPOWNTHREAD =0x1\nWINEVENT_SKIPOWNPROCESS = 0x2\nWINEVENT_INCONTEXT = 0x4\n\n#win events\nEVENT_SYSTEM_SOUND = 0x1\nEVENT_SYSTEM_ALERT = 0x2\nEVENT_SYSTEM_FOREGROUND = 0x3\nEVENT_SYSTEM_MENUSTART = 0x4\nEVENT_SYSTEM_MENUEND = 0x5\nEVENT_SYSTEM_MENUPOPUPSTART = 0x6\nEVENT_SYSTEM_MENUPOPUPEND = 0x7\nEVENT_SYSTEM_CAPTURESTART = 0x8\nEVENT_SYSTEM_CAPTUREEND = 0x9\nEVENT_SYSTEM_MOVESIZESTART = 0xa\nEVENT_SYSTEM_MOVESIZEEND = 0xb\nEVENT_SYSTEM_CONTEXTHELPSTART = 0xc\nEVENT_SYSTEM_CONTEXTHELPEND = 0xd\nEVENT_SYSTEM_DRAGDROPSTART = 0xe\nEVENT_SYSTEM_DRAGDROPEND = 0xf\nEVENT_SYSTEM_DIALOGSTART = 0x10\nEVENT_SYSTEM_DIALOGEND = 0x11\nEVENT_SYSTEM_SCROLLINGSTART = 0x12\nEVENT_SYSTEM_SCROLLINGEND = 0x13\nEVENT_SYSTEM_SWITCHSTART = 0x14\nEVENT_SYSTEM_SWITCHEND = 0x15\nEVENT_SYSTEM_MINIMIZESTART = 0x16\nEVENT_SYSTEM_MINIMIZEEND = 0x17\nEVENT_OBJECT_CREATE = 0x8000\nEVENT_OBJECT_DESTROY = 0x8001\nEVENT_OBJECT_SHOW = 0x8002\nEVENT_OBJECT_HIDE = 0x8003\nEVENT_OBJECT_REORDER = 0x8004\nEVENT_OBJECT_FOCUS = 0x8005\nEVENT_OBJECT_SELECTION = 0x8006\nEVENT_OBJECT_SELECTIONADD = 0x8007\nEVENT_OBJECT_SELECTIONREMOVE = 0x8008\nEVENT_OBJECT_SELECTIONWITHIN = 0x8009\nEVENT_OBJECT_STATECHANGE = 0x800a\nEVENT_OBJECT_LOCATIONCHANGE = 0x800b\nEVENT_OBJECT_NAMECHANGE = 0x800c\nEVENT_OBJECT_DESCRIPTIONCHANGE = 0x800d\nEVENT_OBJECT_VALUECHANGE = 0x800e\nEVENT_OBJECT_PARENTCHANGE = 0x800f\nEVENT_OBJECT_HELPCHANGE = 0x8010\nEVENT_OBJECT_DEFACTIONCHANGE = 0x8011\nEVENT_OBJECT_ACCELERATORCHANGE = 0x8012\nEVENT_CONSOLE_CARET = 0x4001\nEVENT_CONSOLE_UPDATE_REGION = 0x4002\nEVENT_CONSOLE_UPDATE_SIMPLE = 0x4003\nEVENT_CONSOLE_UPDATE_SCROLL = 0x4004\nEVENT_CONSOLE_LAYOUT = 0x4005\nEVENT_CONSOLE_START_APPLICATION = 0x4006\nEVENT_CONSOLE_END_APPLICATION = 0x4007\n\n# IAccessible2 events\nIA2_EVENT_ACTION_CHANGED = 0x101\nIA2_EVENT_ACTIVE_DECENDENT_CHANGED = 0x102\nIA2_EVENT_ACTIVE_DESCENDANT_CHANGED = 0x102\nIA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED = 0x103\nIA2_EVENT_DOCUMENT_CONTENT_CHANGED = 0x104\nIA2_EVENT_DOCUMENT_LOAD_COMPLETE = 0x105\nIA2_EVENT_DOCUMENT_LOAD_STOPPED = 0x106\nIA2_EVENT_DOCUMENT_RELOAD = 0x107\nIA2_EVENT_HYPERLINK_END_INDEX_CHANGED = 0x108\nIA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED = 0x109\nIA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED = 0x10a\nIA2_EVENT_HYPERTEXT_LINK_ACTIVATED = 0x10b\nIA2_EVENT_HYPERTEXT_LINK_SELECTED = 0x10c\nIA2_EVENT_HYPERLINK_START_INDEX_CHANGED = 0x10d\nIA2_EVENT_HYPERTEXT_CHANGED = 0x10e\nIA2_EVENT_HYPERTEXT_NLINKS_CHANGED = 0x11f\nIA2_EVENT_OBJECT_ATTRIBUTE_CHANGED = 0x120\nIA2_EVENT_PAGE_CHANGED = 0x111\nIA2_EVENT_SECTION_CHANGED = 0x112\nIA2_EVENT_TABLE_CAPTION_CHANGED = 0x113\nIA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED = 0x114\nIA2_EVENT_TABLE_COLUMN_HEADER_CHANGED = 0x115\nIA2_EVENT_TABLE_MODEL_CHANGED = 0x116\nIA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED = 0x117\nIA2_EVENT_TABLE_ROW_HEADER_CHANGED = 0x118\nIA2_EVENT_TABLE_SUMMARY_CHANGED = 0x119\nIA2_EVENT_TEXT_ATTRIBUTE_CHANGED = 0x11a\nIA2_EVENT_TEXT_CARET_MOVED = 0x11b\nIA2_EVENT_TEXT_CHANGED = 0x11c\nIA2_EVENT_TEXT_COLUMN_CHANGED = 0x11d\nIA2_EVENT_TEXT_INSERTED = 0x11e\nIA2_EVENT_TEXT_REMOVED = 0x11f\nIA2_EVENT_TEXT_UPDATED = 0x120\nIA2_EVENT_TEXT_SELECTION_CHANGED = 0x121\nIA2_EVENT_VISIBLE_DATA_CHANGED = 0x122\n\nUNLOCALIZED_EVENT_NAMES = {\n\n 0x1: u'EVENT_SYSTEM_SOUND',\n 0x2: u'EVENT_SYSTEM_ALERT',\n 0x3: u'EVENT_SYSTEM_FOREGROUND',\n 0x4: u'EVENT_SYSTEM_MENUSTART',\n 0x5: u'EVENT_SYSTEM_MENUEND',\n 0x6: u'EVENT_SYSTEM_MENUPOPUPSTART',\n 0x7: u'EVENT_SYSTEM_MENUPOPUPEND',\n 0x8: u'EVENT_SYSTEM_CAPTURESTART',\n 0x9: u'EVENT_SYSTEM_CAPTUREEND',\n 0xa: u'EVENT_SYSTEM_MOVESIZESTART',\n 0xb: u'EVENT_SYSTEM_MOVESIZEEND',\n 0xc: u'EVENT_SYSTEM_CONTEXTHELPSTART',\n 0xd: u'EVENT_SYSTEM_CONTEXTHELPEND',\n 0xe: u'EVENT_SYSTEM_DRAGDROPSTART',\n 0xf: u'EVENT_SYSTEM_DRAGDROPEND',\n 0x10: u'EVENT_SYSTEM_DIALOGSTART',\n 0x11: u'EVENT_SYSTEM_DIALOGEND',\n 0x12: u'EVENT_SYSTEM_SCROLLINGSTART',\n 0x13: u'EVENT_SYSTEM_SCROLLINGEND',\n 0x14: u'EVENT_SYSTEM_SWITCHSTART',\n 0x15: u'EVENT_SYSTEM_SWITCHEND',\n 0x16: u'EVENT_SYSTEM_MINIMIZESTART',\n 0x17: u'EVENT_SYSTEM_MINIMIZEEND',\n\n 0x101: u'IA2_EVENT_ACTION_CHANGED',\n 0x102: u'IA2_EVENT_ACTIVE_DESCENDANT_CHANGED',\n 0x103: u'IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED',\n 0x104: u'IA2_EVENT_DOCUMENT_CONTENT_CHANGED',\n 0x105: u'IA2_EVENT_DOCUMENT_LOAD_COMPLETE',\n 0x106: u'IA2_EVENT_DOCUMENT_LOAD_STOPPED',\n 0x107: u'IA2_EVENT_DOCUMENT_RELOAD',\n 0x108: u'IA2_EVENT_HYPERLINK_END_INDEX_CHANGED',\n 0x109: u'IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED',\n 0x10a: u'IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED',\n 0x10b: u'IA2_EVENT_HYPERTEXT_LINK_ACTIVATED',\n 0x10c: u'IA2_EVENT_HYPERTEXT_LINK_SELECTED',\n 0x10d: u'IA2_EVENT_HYPERLINK_START_INDEX_CHANGED',\n 0x10e: u'IA2_EVENT_HYPERTEXT_CHANGED',\n 0x10f: u'IA2_EVENT_HYPERTEXT_NLINKS_CHANGED',\n 0x110: u'IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED',\n 0x111: u'IA2_EVENT_PAGE_CHANGED',\n 0x112: u'IA2_EVENT_SECTION_CHANGED',\n 0x113: u'IA2_EVENT_TABLE_CAPTION_CHANGED',\n 0x114: u'IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED',\n 0x115: u'IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED',\n 0x116: u'IA2_EVENT_TABLE_MODEL_CHANGED',\n 0x117: u'IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED',\n 0x118: u'IA2_EVENT_TABLE_ROW_HEADER_CHANGED',\n 0x119: u'IA2_EVENT_TABLE_SUMMARY_CHANGED',\n 0x11a: u'IA2_EVENT_TEXT_ATTRIBUTE_CHANGED',\n 0x11b: u'IA2_EVENT_TEXT_CARET_MOVED',\n 0x11c: u'IA2_EVENT_TEXT_CHANGED',\n 0x11d: u'IA2_EVENT_TEXT_COLUMN_CHANGED',\n 0x11e: u'IA2_EVENT_TEXT_INSERTED',\n 0x11f: u'IA2_EVENT_TEXT_REMOVED',\n 0x120: u'IA2_EVENT_TEXT_UPDATED',\n 0x121: u'IA2_EVENT_TEXT_SELECTION_CHANGED',\n 0x122: u'IA2_EVENT_VISIBLE_DATA_CHANGED',\n\n 0x4001: u'EVENT_CONSOLE_CARET',\n 0x4002: u'EVENT_CONSOLE_UPDATE_REGION',\n 0x4003: u'EVENT_CONSOLE_UPDATE_SIMPLE',\n 0x4004: u'EVENT_CONSOLE_UPDATE_SCROLL',\n 0x4005: u'EVENT_CONSOLE_LAYOUT',\n 0x4006: u'EVENT_CONSOLE_START_APPLICATION',\n 0x4007: u'EVENT_CONSOLE_END_APPLICATION',\n\n 0x8000: u'EVENT_OBJECT_CREATE',\n 0x8001: u'EVENT_OBJECT_DESTROY',\n 0x8002: u'EVENT_OBJECT_SHOW',\n 0x8003: u'EVENT_OBJECT_HIDE',\n 0x8004: u'EVENT_OBJECT_REORDER',\n 0x8005: u'EVENT_OBJECT_FOCUS',\n 0x8006: u'EVENT_OBJECT_SELECTION',\n 0x8007: u'EVENT_OBJECT_SELECTIONADD',\n 0x8008: u'EVENT_OBJECT_SELECTIONREMOVE',\n 0x8009: u'EVENT_OBJECT_SELECTIONWITHIN',\n 0x800a: u'EVENT_OBJECT_STATECHANGE',\n 0x800b: u'EVENT_OBJECT_LOCATIONCHANGE',\n 0x800c: u'EVENT_OBJECT_NAMECHANGE',\n 0x800d: u'EVENT_OBJECT_DESCRIPTIONCHANGE',\n 0x800e: u'EVENT_OBJECT_VALUECHANGE',\n 0x800f: u'EVENT_OBJECT_PARENTCHANGE',\n 0x8010: u'EVENT_OBJECT_HELPCHANGE',\n 0x8011: u'EVENT_OBJECT_DEFACTIONCHANGE',\n 0x8012: u'EVENT_OBJECT_ACCELERATORCHANGE'}\n\n\nwinEventIDsToEventNames={}\n\nfor _sym, _val in locals().items():\n if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):\n winEventIDsToEventNames[_val] = _sym\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def search(url):
browser = webdriver.Chrome(executable_path=
'C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe')
browser.get(url)
time.sleep(1)
element = browser.find_element_by_tag_name('body')
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
browser.find_element_by_id('smb').click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
time.sleep(1)
source = browser.page_source
browser.close()
return source
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def search(url):
browser = webdriver.Chrome(executable_path=
'C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe')
browser.get(url)
time.sleep(1)
element = browser.find_element_by_tag_name('body')
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
browser.find_element_by_id('smb').click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
time.sleep(1)
source = browser.page_source
browser.close()
return source
def download_image(link):
headers = {'User-Agent': ua.random}
try:
r = requests.get('https://www.google.com' + link.get('href'),
headers=headers)
except:
print('Cannot get link.')
title = str(fromstring(r.content).findtext('.//title'))
link = title.split(' ')[-1]
print('At : ' + os.getcwd() + ', Downloading from ' + link)
try:
if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):
urllib.request.urlretrieve(link, link.split('/')[-1])
except:
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def search(url):
browser = webdriver.Chrome(executable_path=
'C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe')
browser.get(url)
time.sleep(1)
element = browser.find_element_by_tag_name('body')
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
browser.find_element_by_id('smb').click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
time.sleep(1)
source = browser.page_source
browser.close()
return source
def download_image(link):
headers = {'User-Agent': ua.random}
try:
r = requests.get('https://www.google.com' + link.get('href'),
headers=headers)
except:
print('Cannot get link.')
title = str(fromstring(r.content).findtext('.//title'))
link = title.split(' ')[-1]
print('At : ' + os.getcwd() + ', Downloading from ' + link)
try:
if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):
urllib.request.urlretrieve(link, link.split('/')[-1])
except:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('keyword', help='the keyword to search')
args = parser.parse_args()
sys.setrecursionlimit(100000000)
query = args.keyword
url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +
'&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'
)
source = search(url)
soup = BeautifulSoup(str(source), 'html.parser')
ua = UserAgent()
if not os.path.isdir(args.keyword):
os.makedirs(args.keyword)
os.chdir(str(os.getcwd()) + '/' + str(args.keyword))
links = soup.find_all('a', class_='rg_l')
with Pool() as pool:
pool.map(download_image, links)
<|reserved_special_token_1|>
import requests
import time
import urllib
import argparse
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from fake_useragent import UserAgent
from multiprocessing import Pool
from lxml.html import fromstring
import os, sys
def search(url):
browser = webdriver.Chrome(executable_path=
'C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe')
browser.get(url)
time.sleep(1)
element = browser.find_element_by_tag_name('body')
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
browser.find_element_by_id('smb').click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
time.sleep(1)
source = browser.page_source
browser.close()
return source
def download_image(link):
headers = {'User-Agent': ua.random}
try:
r = requests.get('https://www.google.com' + link.get('href'),
headers=headers)
except:
print('Cannot get link.')
title = str(fromstring(r.content).findtext('.//title'))
link = title.split(' ')[-1]
print('At : ' + os.getcwd() + ', Downloading from ' + link)
try:
if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):
urllib.request.urlretrieve(link, link.split('/')[-1])
except:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('keyword', help='the keyword to search')
args = parser.parse_args()
sys.setrecursionlimit(100000000)
query = args.keyword
url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +
'&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'
)
source = search(url)
soup = BeautifulSoup(str(source), 'html.parser')
ua = UserAgent()
if not os.path.isdir(args.keyword):
os.makedirs(args.keyword)
os.chdir(str(os.getcwd()) + '/' + str(args.keyword))
links = soup.find_all('a', class_='rg_l')
with Pool() as pool:
pool.map(download_image, links)
<|reserved_special_token_1|>
import requests
import time
import urllib
import argparse
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from fake_useragent import UserAgent
from multiprocessing import Pool
from lxml.html import fromstring
import os, sys
#text = 'chowchowbaby'
#url='https://www.google.co.kr/search?q=' + text + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'
def search(url):
#Create a browser
browser=webdriver.Chrome(executable_path='C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe')
#Open the link
browser.get(url)
time.sleep(1)
element=browser.find_element_by_tag_name("body")
#Scroll down
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
browser.find_element_by_id("smb").click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
time.sleep(1)
#Get page source and close the browser
source=browser.page_source
browser.close()
return source
def download_image(link):
# Use a random user agent header
headers = {"User-Agent": ua.random}
# Get the image link
try:
r = requests.get("https://www.google.com" + link.get("href"), headers=headers)
except:
print("Cannot get link.")
title = str(fromstring(r.content).findtext(".//title"))
link = title.split(" ")[-1]
# Download the image
print("At : " + os.getcwd() + ", Downloading from " + link)
try:
if link.split(".")[-1] == ('jpg' or 'png' or 'jpeg'):
urllib.request.urlretrieve(link, link.split("/")[-1])
except:
pass
if __name__ == "__main__":
# parse command line options
parser = argparse.ArgumentParser()
parser.add_argument("keyword", help="the keyword to search")
args = parser.parse_args()
# set stack limit
sys.setrecursionlimit(100000000)
# get user input and search on google
query = args.keyword
#query = input("Enter the name you want to search")
url = "https://www.google.com/search?as_st=y&tbm=isch&as_q=" + query + \
"&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg"
source = search(url)
# Parse the page source and download pics
soup = BeautifulSoup(str(source), "html.parser")
ua = UserAgent()
# check directory and create if necessary
if not os.path.isdir(args.keyword):
os.makedirs(args.keyword)
os.chdir(str(os.getcwd()) + "/" + str(args.keyword))
# get the links
links = soup.find_all("a", class_="rg_l")
# open some processes to download
with Pool() as pool:
pool.map(download_image, links)
# 검색어
#search = 'chowchowbaby'
#url='https://www.google.co.kr/search?q=' + search + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'
# url
#driver = webdriver.Chrome(executable_path="C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe")
#driver.get(url)
#driver.implicitly_wait(2)
#num_of_pagedowns = 50
#elem = driver.find_element_by_xpath('/html/body')
#i = 0
#count = 1
#img = driver.find_elements_by_tag_name("img")
#while i < num_of_pagedowns:
#for item in img:
# if(count>0 and count<502):
# elem.send_keys(Keys.DOWN)
# time.sleep(1)
# full_name = "C:\\Program Files\\Python35\\강아지크롤러\\chowchowbaby\\" + str(count) + "_chowchowbaby.jpg"
# try:
# urllib.request.urlretrieve(item.get_attribute('src'), full_name)
# tfp=open(full_name,url)
# print(item.get_attribute('src')[:30] + " : ")
# except:
# urllib.request.urlretrieve(item.get_attribute('data-src'), full_name)
# tfp=open(full_name,url)
# print(item.get_attribute('data-src')[:30] + " : ")
# count = count+1
# i =i+1
#driver.Quit()
#print("Done.")
|
flexible
|
{
"blob_id": "142a2ba3ec2f6b35f4339ed9fffe7357c1a85fa0",
"index": 219,
"step-1": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('keyword', help='the keyword to search')\n args = parser.parse_args()\n sys.setrecursionlimit(100000000)\n query = args.keyword\n url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +\n '&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'\n )\n source = search(url)\n soup = BeautifulSoup(str(source), 'html.parser')\n ua = UserAgent()\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n os.chdir(str(os.getcwd()) + '/' + str(args.keyword))\n links = soup.find_all('a', class_='rg_l')\n with Pool() as pool:\n pool.map(download_image, links)\n",
"step-4": "import requests\nimport time\nimport urllib\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\nfrom multiprocessing import Pool\nfrom lxml.html import fromstring\nimport os, sys\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('keyword', help='the keyword to search')\n args = parser.parse_args()\n sys.setrecursionlimit(100000000)\n query = args.keyword\n url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +\n '&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'\n )\n source = search(url)\n soup = BeautifulSoup(str(source), 'html.parser')\n ua = UserAgent()\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n os.chdir(str(os.getcwd()) + '/' + str(args.keyword))\n links = soup.find_all('a', class_='rg_l')\n with Pool() as pool:\n pool.map(download_image, links)\n",
"step-5": "import requests\nimport time\nimport urllib\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\nfrom multiprocessing import Pool\nfrom lxml.html import fromstring\nimport os, sys\n\n#text = 'chowchowbaby'\n#url='https://www.google.co.kr/search?q=' + text + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'\n\ndef search(url):\n #Create a browser\n browser=webdriver.Chrome(executable_path='C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n\n #Open the link\n browser.get(url)\n time.sleep(1)\n\n element=browser.find_element_by_tag_name(\"body\")\n\n #Scroll down\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n\n browser.find_element_by_id(\"smb\").click()\n\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n\n time.sleep(1)\n\n #Get page source and close the browser\n source=browser.page_source\n browser.close()\n\n return source\n\n\ndef download_image(link):\n # Use a random user agent header\n headers = {\"User-Agent\": ua.random}\n\n # Get the image link\n try:\n r = requests.get(\"https://www.google.com\" + link.get(\"href\"), headers=headers)\n except:\n print(\"Cannot get link.\")\n title = str(fromstring(r.content).findtext(\".//title\"))\n link = title.split(\" \")[-1]\n\n # Download the image\n print(\"At : \" + os.getcwd() + \", Downloading from \" + link)\n try:\n if link.split(\".\")[-1] == ('jpg' or 'png' or 'jpeg'):\n\n urllib.request.urlretrieve(link, link.split(\"/\")[-1])\n except:\n pass\n\n\nif __name__ == \"__main__\":\n # parse command line options\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"keyword\", help=\"the keyword to search\")\n args = parser.parse_args()\n\n # set stack limit\n sys.setrecursionlimit(100000000)\n\n # get user input and search on google\n query = args.keyword\n\n\n #query = input(\"Enter the name you want to search\")\n\n\n\n url = \"https://www.google.com/search?as_st=y&tbm=isch&as_q=\" + query + \\\n \"&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg\"\n source = search(url)\n\n # Parse the page source and download pics\n soup = BeautifulSoup(str(source), \"html.parser\")\n ua = UserAgent()\n\n # check directory and create if necessary\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n\n os.chdir(str(os.getcwd()) + \"/\" + str(args.keyword))\n # get the links\n links = soup.find_all(\"a\", class_=\"rg_l\")\n\n # open some processes to download\n with Pool() as pool:\n pool.map(download_image, links)\n \n\n\n\n\n\n# 검색어\n#search = 'chowchowbaby'\n#url='https://www.google.co.kr/search?q=' + search + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'\n# url\n#driver = webdriver.Chrome(executable_path=\"C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe\")\n#driver.get(url)\n#driver.implicitly_wait(2)\n\n\n#num_of_pagedowns = 50\n#elem = driver.find_element_by_xpath('/html/body') \n\n#i = 0\n#count = 1\n#img = driver.find_elements_by_tag_name(\"img\")\n\n#while i < num_of_pagedowns:\n#for item in img:\n# if(count>0 and count<502):\n# elem.send_keys(Keys.DOWN)\n# time.sleep(1)\n# full_name = \"C:\\\\Program Files\\\\Python35\\\\강아지크롤러\\\\chowchowbaby\\\\\" + str(count) + \"_chowchowbaby.jpg\"\n# try:\n# urllib.request.urlretrieve(item.get_attribute('src'), full_name)\n# tfp=open(full_name,url)\n# print(item.get_attribute('src')[:30] + \" : \")\n# except:\n# urllib.request.urlretrieve(item.get_attribute('data-src'), full_name)\n# tfp=open(full_name,url)\n# print(item.get_attribute('data-src')[:30] + \" : \")\n# count = count+1\n# i =i+1\n\n \n#driver.Quit()\n#print(\"Done.\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Category(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
addtime = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return '<User %r>' % self.name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
addtime = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return '<User %r>' % self.name
if __name__ == '__main__':
db.create_all()
<|reserved_special_token_1|>
from flask_sqlalchemy import SQLAlchemy
from config.manager import app
from config.db import db
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
addtime = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return '<User %r>' % self.name
if __name__ == '__main__':
db.create_all()
<|reserved_special_token_1|>
# coding:utf-8
from flask_sqlalchemy import SQLAlchemy
from config.manager import app
from config.db import db
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(20), nullable=False) # 账号
addtime = db.Column(db.DateTime, nullable=False) # 注册时间
def __repr__(self):
return "<User %r>" % self.name
if __name__ == '__main__':
db.create_all()
|
flexible
|
{
"blob_id": "743aa4ccbb9a131b5ef3d04475789d3d1da1a2fa",
"index": 2407,
"step-1": "<mask token>\n\n\nclass Category(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-4": "from flask_sqlalchemy import SQLAlchemy\nfrom config.manager import app\nfrom config.db import db\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-5": "# coding:utf-8\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config.manager import app\nfrom config.db import db\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True) # 编号\n name = db.Column(db.String(20), nullable=False) # 账号\n addtime = db.Column(db.DateTime, nullable=False) # 注册时间\n\n def __repr__(self):\n return \"<User %r>\" % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import cv2
import numpy as np
# THRESHOLDING FUNCTION IMPLEMENTATION
def thresholding(img):
# visualizing image in HSV parameters
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# the values for lowerWhite and upperWhite are found by tweaking the HSV min/max params in the
# trackbar by running ColorPickerScript.py
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
# passing the values of lowerWhite and upperWhite to create the mask
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
# WARPING FUNCTION IMPLEMENTATION
def warpImg (img, points, w, h, inv=False):
pts1 = np.float32(points)
# defining the border coordinates of the warped image
pts2 = np.float32([[0,0], [w,0], [0,h], [w,h]])
# finding the transformation matrix
if inv:
#if inverted interchange pts2 and pts1
matrix = cv2.getPerspectiveTransform(pts2,pts1)
else:
matrix = cv2.getPerspectiveTransform(pts1,pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w,h))
return imgWarp
# trackbar change will call nothing()
def nothing(a):
pass
# Creating the trackbars to find the optimal warping points.
# Care should be taken to choose points which are not very far from our current position
# ie. mostly lying in the bottom half region of the image since we should only confidently
# predict the lane warp present on the road at this point of time.
# create trackbars
def initializeTrackbars(initialTrackbarVals, wT=480, hT=240):
# wT and hT are the target window dimensions ie. window with video
# create trackbar window
cv2.namedWindow("Trackbars")
cv2.resizeWindow("Trackbars", 360, 240)
cv2.createTrackbar("Width Top", "Trackbars", initialTrackbarVals[0], wT//2, nothing)
cv2.createTrackbar("Height Top", "Trackbars", initialTrackbarVals[1], hT, nothing)
cv2.createTrackbar("Width Bottom", "Trackbars", initialTrackbarVals[2], wT//2, nothing)
cv2.createTrackbar("Height Bottom", "Trackbars", initialTrackbarVals[3], hT, nothing)
# find the value of trackbars (real-time)
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos("Width Top", "Trackbars")
heightTop = cv2.getTrackbarPos("Height Top", "Trackbars")
widthBottom = cv2.getTrackbarPos("Width Bottom", "Trackbars")
heightBottom = cv2.getTrackbarPos("Height Bottom", "Trackbars")
# return the bounding coordinates
points = np.float32([(widthTop, heightTop), (wT-widthTop, heightTop), (widthBottom, heightBottom), (wT-widthBottom, heightBottom)])
return points
# draw the warp points as red circles
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0,0,255), cv2.FILLED)
return img
# HISTOGRAM IMPLEMENTATION (TO FIND CURVE TURNING LEFT/RIGHT)
def getHistogram(img, minPer=0.1, display= False, region=1):
# simply sum all the pixels in the y direction
if region == 1:
# find histvalues for the complete region
histValues = np.sum(img, axis=0)
else:
# find histvalues for ONLY the bottom (1/n)th region where n is region value
histValues = np.sum(img[img.shape[0]//region:,:], axis=0)
#print(histValues)
# Some of the pixels in our image might just be noise. So we don’t want to use them in our
# calculation. Therefore we will set a threshold value which will be the minimum value required
# for any column to qualify as part of the path and not noise. We can set a hard-coded value but
# it is better to get it based on the live data. So we will find the maximum sum value and
# multiply our user defined percentage to it to create our threshold value.
maxValue = np.max(histValues)
minValue = minPer*maxValue
# To get the value of the curvature we will find the indices of all the columns that have value
# more than our threshold and then we will average our indices.
indexArray = np.where(histValues >= minValue)
basePoint = int(np.average(indexArray))
#print(basePoint)
if display:
imgHist = np.zeros((img.shape[0],img.shape[1],3),np.uint8)
for x,intensity in enumerate(histValues):
cv2.line(imgHist,(x,img.shape[0]),(x,img.shape[0]-intensity//255//region),(255,0,255),1)
cv2.circle(imgHist,(basePoint,img.shape[0]),20,(0,255,255),cv2.FILLED)
return basePoint,imgHist
return basePoint
# stack all the display windows
# (ONLY FOR DISPLAY PURPOSES, NO EFFECT ON PROGRAM)
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range (0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
|
normal
|
{
"blob_id": "44175d2559f9c7d6171b6e45d24719d50dc80fb7",
"index": 7221,
"step-1": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\n<mask token>\n\n\ndef nothing(a):\n pass\n\n\n<mask token>\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\n<mask token>\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-3": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240):\n cv2.namedWindow('Trackbars')\n cv2.resizeWindow('Trackbars', 360, 240)\n cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //\n 2, nothing)\n cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],\n hT, nothing)\n cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],\n wT // 2, nothing)\n cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],\n hT, nothing)\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-4": "import cv2\nimport numpy as np\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240):\n cv2.namedWindow('Trackbars')\n cv2.resizeWindow('Trackbars', 360, 240)\n cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //\n 2, nothing)\n cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],\n hT, nothing)\n cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],\n wT // 2, nothing)\n cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],\n hT, nothing)\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-5": "import cv2\nimport numpy as np\n\n# THRESHOLDING FUNCTION IMPLEMENTATION\ndef thresholding(img):\n # visualizing image in HSV parameters\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # the values for lowerWhite and upperWhite are found by tweaking the HSV min/max params in the \n # trackbar by running ColorPickerScript.py\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n # passing the values of lowerWhite and upperWhite to create the mask\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n# WARPING FUNCTION IMPLEMENTATION\ndef warpImg (img, points, w, h, inv=False):\n pts1 = np.float32(points)\n # defining the border coordinates of the warped image\n pts2 = np.float32([[0,0], [w,0], [0,h], [w,h]])\n # finding the transformation matrix\n if inv:\n #if inverted interchange pts2 and pts1\n matrix = cv2.getPerspectiveTransform(pts2,pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1,pts2)\n \n imgWarp = cv2.warpPerspective(img, matrix, (w,h))\n return imgWarp\n\n# trackbar change will call nothing()\ndef nothing(a): \n pass\n\n# Creating the trackbars to find the optimal warping points.\n# Care should be taken to choose points which are not very far from our current position\n# ie. mostly lying in the bottom half region of the image since we should only confidently\n# predict the lane warp present on the road at this point of time.\n\n# create trackbars \ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240): \n # wT and hT are the target window dimensions ie. window with video\n # create trackbar window\n cv2.namedWindow(\"Trackbars\")\n cv2.resizeWindow(\"Trackbars\", 360, 240)\n cv2.createTrackbar(\"Width Top\", \"Trackbars\", initialTrackbarVals[0], wT//2, nothing)\n cv2.createTrackbar(\"Height Top\", \"Trackbars\", initialTrackbarVals[1], hT, nothing)\n cv2.createTrackbar(\"Width Bottom\", \"Trackbars\", initialTrackbarVals[2], wT//2, nothing)\n cv2.createTrackbar(\"Height Bottom\", \"Trackbars\", initialTrackbarVals[3], hT, nothing)\n\n# find the value of trackbars (real-time)\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos(\"Width Top\", \"Trackbars\")\n heightTop = cv2.getTrackbarPos(\"Height Top\", \"Trackbars\")\n widthBottom = cv2.getTrackbarPos(\"Width Bottom\", \"Trackbars\")\n heightBottom = cv2.getTrackbarPos(\"Height Bottom\", \"Trackbars\")\n # return the bounding coordinates\n points = np.float32([(widthTop, heightTop), (wT-widthTop, heightTop), (widthBottom, heightBottom), (wT-widthBottom, heightBottom)])\n return points\n\n# draw the warp points as red circles\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0,0,255), cv2.FILLED)\n return img\n\n# HISTOGRAM IMPLEMENTATION (TO FIND CURVE TURNING LEFT/RIGHT)\ndef getHistogram(img, minPer=0.1, display= False, region=1): \n # simply sum all the pixels in the y direction\n if region == 1:\n # find histvalues for the complete region\n histValues = np.sum(img, axis=0)\n else:\n # find histvalues for ONLY the bottom (1/n)th region where n is region value\n histValues = np.sum(img[img.shape[0]//region:,:], axis=0)\n \n #print(histValues)\n \n # Some of the pixels in our image might just be noise. So we don’t want to use them in our \n # calculation. Therefore we will set a threshold value which will be the minimum value required\n # for any column to qualify as part of the path and not noise. We can set a hard-coded value but\n # it is better to get it based on the live data. So we will find the maximum sum value and \n # multiply our user defined percentage to it to create our threshold value.\n maxValue = np.max(histValues)\n minValue = minPer*maxValue\n \n # To get the value of the curvature we will find the indices of all the columns that have value \n # more than our threshold and then we will average our indices.\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n #print(basePoint)\n \n if display:\n imgHist = np.zeros((img.shape[0],img.shape[1],3),np.uint8)\n for x,intensity in enumerate(histValues):\n cv2.line(imgHist,(x,img.shape[0]),(x,img.shape[0]-intensity//255//region),(255,0,255),1)\n cv2.circle(imgHist,(basePoint,img.shape[0]),20,(0,255,255),cv2.FILLED)\n return basePoint,imgHist\n \n return basePoint\n \n# stack all the display windows\n# (ONLY FOR DISPLAY PURPOSES, NO EFFECT ON PROGRAM) \ndef stackImages(scale,imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range (0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)\n if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank]*rows\n hor_con = [imageBlank]*rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)\n if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor= np.hstack(imgArray)\n ver = hor\n return ver",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
# coding=utf-8
# @FileName: test_json.py
# @Author: ZhengQiang
# Date: 2020/1/15 5:26 下午
import json
a = "{\"ddd\": {{}}}"
def boyhook(dic):
print('test')
if dic['name']:
return dic['name'], dic['age']
return dic
new_boy = json.loads(a, object_hook=boyhook)
print(new_boy)
|
normal
|
{
"blob_id": "2bc5711839ccbe525551b60211d8cd79ddb7775a",
"index": 7019,
"step-1": "<mask token>\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\n<mask token>\nprint(new_boy)\n",
"step-3": "<mask token>\na = '{\"ddd\": {{}}}'\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)\n",
"step-4": "import json\na = '{\"ddd\": {{}}}'\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)\n",
"step-5": "# coding=utf-8\n# @FileName: test_json.py\n# @Author: ZhengQiang\n# Date: 2020/1/15 5:26 下午\nimport json\na = \"{\\\"ddd\\\": {{}}}\"\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from datetime import datetime
from pymongo import MongoClient
from bson import ObjectId
from config import config
class Database(object):
def __init__(self):
self.client = MongoClient(config['db']['url']) # configure db url
self.db = self.client[config['db']['name']] # configure db name
def insert(self, element, collection_name):
element["created"] = datetime.now()
element["updated"] = datetime.now()
inserted = self.db[collection_name].insert_one(element) # insert data to db
return str(inserted.inserted_id)
def find(self, criteria, collection_name, projection=None, sort=None, limit=0, cursor=False): # find all from db
if "_id" in criteria:
criteria["_id"] = ObjectId(criteria["_id"])
found = self.db[collection_name].find(filter=criteria, projection=projection, limit=limit, sort=sort)
if cursor:
return found
found = list(found)
for i in range(len(found)): # to serialize object id need to convert string
if "_id" in found[i]:
found[i]["_id"] = str(found[i]["_id"])
return found
def find_by_id(self, id, collection_name):
found = self.db[collection_name].find_one({"_id": ObjectId(id)})
if found is None:
return not found
if "_id" in found:
found["_id"] = str(found["_id"])
return found
def update(self, id, element, collection_name):
criteria = {"_id": ObjectId(id)}
element["updated"] = datetime.now()
set_obj = {"$set": element} # update value
updated = self.db[collection_name].update_one(criteria, set_obj)
if updated.matched_count == 1:
return "Record Successfully Updated"
def delete(self, id, collection_name):
deleted = self.db[collection_name].delete_one({"_id": ObjectId(id)})
return bool(deleted.deleted_count)
|
normal
|
{
"blob_id": "bcc76e4dbcc191e7912085cbb92c5b0ebd2b047b",
"index": 6550,
"step-1": "<mask token>\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n <mask token>\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n <mask token>\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n\n def update(self, id, element, collection_name):\n criteria = {'_id': ObjectId(id)}\n element['updated'] = datetime.now()\n set_obj = {'$set': element}\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return 'Record Successfully Updated'\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-3": "<mask token>\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n\n def insert(self, element, collection_name):\n element['created'] = datetime.now()\n element['updated'] = datetime.now()\n inserted = self.db[collection_name].insert_one(element)\n return str(inserted.inserted_id)\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n\n def update(self, id, element, collection_name):\n criteria = {'_id': ObjectId(id)}\n element['updated'] = datetime.now()\n set_obj = {'$set': element}\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return 'Record Successfully Updated'\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-4": "from datetime import datetime\nfrom pymongo import MongoClient\nfrom bson import ObjectId\nfrom config import config\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n\n def insert(self, element, collection_name):\n element['created'] = datetime.now()\n element['updated'] = datetime.now()\n inserted = self.db[collection_name].insert_one(element)\n return str(inserted.inserted_id)\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n\n def update(self, id, element, collection_name):\n criteria = {'_id': ObjectId(id)}\n element['updated'] = datetime.now()\n set_obj = {'$set': element}\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return 'Record Successfully Updated'\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-5": "from datetime import datetime\nfrom pymongo import MongoClient\nfrom bson import ObjectId\n\nfrom config import config\n\n\nclass Database(object):\n def __init__(self):\n self.client = MongoClient(config['db']['url']) # configure db url\n self.db = self.client[config['db']['name']] # configure db name\n\n def insert(self, element, collection_name):\n element[\"created\"] = datetime.now()\n element[\"updated\"] = datetime.now()\n inserted = self.db[collection_name].insert_one(element) # insert data to db\n return str(inserted.inserted_id)\n\n def find(self, criteria, collection_name, projection=None, sort=None, limit=0, cursor=False): # find all from db\n\n if \"_id\" in criteria:\n criteria[\"_id\"] = ObjectId(criteria[\"_id\"])\n\n found = self.db[collection_name].find(filter=criteria, projection=projection, limit=limit, sort=sort)\n\n if cursor:\n return found\n\n found = list(found)\n\n for i in range(len(found)): # to serialize object id need to convert string\n if \"_id\" in found[i]:\n found[i][\"_id\"] = str(found[i][\"_id\"])\n\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({\"_id\": ObjectId(id)})\n \n if found is None:\n return not found\n \n if \"_id\" in found:\n found[\"_id\"] = str(found[\"_id\"])\n\n return found\n\n def update(self, id, element, collection_name):\n criteria = {\"_id\": ObjectId(id)}\n\n element[\"updated\"] = datetime.now()\n set_obj = {\"$set\": element} # update value\n\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return \"Record Successfully Updated\"\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({\"_id\": ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from flask import Blueprint, request
from ecdsa import SigningKey, NIST384p
import base64, codecs
from cryptography.fernet import Fernet
ecdsa_app = Blueprint('ecdsa_app', __name__, url_prefix='/ecdsa_app')
f = Fernet(Fernet.generate_key())
sk = SigningKey.generate(curve=NIST384p)
vk = sk.get_verifying_key()
@ecdsa_app.get('/create_pkey')
def private_key():
#reverse = bytes.fromhex(sk.to_string().hex())
return {"status":"success", "result":sk.to_string().hex()}
@ecdsa_app.post('/op')
def check_op():
input = request.get_json()
operators = ['+', '-', '*', '/', '**', '//', '%']
finaloutput = {}
if input['data']['op'] in operators:
finaloutput['status'] = 'success'
finaloutput['message'] = 'successfully verified'
finaloutput['result'] = str(input['data'])
else:
finaloutput['status'] = 'failure'
finaloutput['message'] = 'invalid operator'
return finaloutput
@ecdsa_app.post('/verify_signature')
def signature_verify():
input = request.get_json()
token = f.encrypt(str(input['data']).encode())
#reverse_signature = bytes.fromhex(input["signature"])
signature_ = sk.sign(token)
finaloutput = {}
try:
if (vk.verify(signature_, token)):
finaloutput['status'] = 'success'
finaloutput['message'] = 'successfully verified'
except:
finaloutput['status'] = 'failure'
finaloutput['message'] = 'signature is invalid'
return finaloutput
@ecdsa_app.post('/verify')
def verify_fun():
data = request.get_json()
output = check_operator_verify(data)
finaloutput ={}
if output:
finaloutput['status'] = 'success'
finaloutput['message'] = 'successfully verified'
else:
finaloutput['status'] = 'failure'
finaloutput['message'] = 'invalid operator or signature is invalid'
return finaloutput
def check_operator_verify(input):
try:
operators = ['+', '-', '*', '/', '**', '//', '%']
if input['data']['op'] in operators:
token = f.encrypt(str(input['data']).encode())
reverse_signature = bytes.fromhex(input["signature"])
return (vk.verify(reverse_signature, token))
except:
pass
|
normal
|
{
"blob_id": "4eb7abb24451f3f895d0731de7b29a85d90c1539",
"index": 8246,
"step-1": "<mask token>\n\n\n@ecdsa_app.get('/create_pkey')\ndef private_key():\n return {'status': 'success', 'result': sk.to_string().hex()}\n\n\n@ecdsa_app.post('/op')\ndef check_op():\n input = request.get_json()\n operators = ['+', '-', '*', '/', '**', '//', '%']\n finaloutput = {}\n if input['data']['op'] in operators:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n finaloutput['result'] = str(input['data'])\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator'\n return finaloutput\n\n\n<mask token>\n\n\n@ecdsa_app.post('/verify')\ndef verify_fun():\n data = request.get_json()\n output = check_operator_verify(data)\n finaloutput = {}\n if output:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator or signature is invalid'\n return finaloutput\n\n\ndef check_operator_verify(input):\n try:\n operators = ['+', '-', '*', '/', '**', '//', '%']\n if input['data']['op'] in operators:\n token = f.encrypt(str(input['data']).encode())\n reverse_signature = bytes.fromhex(input['signature'])\n return vk.verify(reverse_signature, token)\n except:\n pass\n",
"step-2": "<mask token>\n\n\n@ecdsa_app.get('/create_pkey')\ndef private_key():\n return {'status': 'success', 'result': sk.to_string().hex()}\n\n\n@ecdsa_app.post('/op')\ndef check_op():\n input = request.get_json()\n operators = ['+', '-', '*', '/', '**', '//', '%']\n finaloutput = {}\n if input['data']['op'] in operators:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n finaloutput['result'] = str(input['data'])\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator'\n return finaloutput\n\n\n@ecdsa_app.post('/verify_signature')\ndef signature_verify():\n input = request.get_json()\n token = f.encrypt(str(input['data']).encode())\n signature_ = sk.sign(token)\n finaloutput = {}\n try:\n if vk.verify(signature_, token):\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n except:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'signature is invalid'\n return finaloutput\n\n\n@ecdsa_app.post('/verify')\ndef verify_fun():\n data = request.get_json()\n output = check_operator_verify(data)\n finaloutput = {}\n if output:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator or signature is invalid'\n return finaloutput\n\n\ndef check_operator_verify(input):\n try:\n operators = ['+', '-', '*', '/', '**', '//', '%']\n if input['data']['op'] in operators:\n token = f.encrypt(str(input['data']).encode())\n reverse_signature = bytes.fromhex(input['signature'])\n return vk.verify(reverse_signature, token)\n except:\n pass\n",
"step-3": "<mask token>\necdsa_app = Blueprint('ecdsa_app', __name__, url_prefix='/ecdsa_app')\nf = Fernet(Fernet.generate_key())\nsk = SigningKey.generate(curve=NIST384p)\nvk = sk.get_verifying_key()\n\n\n@ecdsa_app.get('/create_pkey')\ndef private_key():\n return {'status': 'success', 'result': sk.to_string().hex()}\n\n\n@ecdsa_app.post('/op')\ndef check_op():\n input = request.get_json()\n operators = ['+', '-', '*', '/', '**', '//', '%']\n finaloutput = {}\n if input['data']['op'] in operators:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n finaloutput['result'] = str(input['data'])\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator'\n return finaloutput\n\n\n@ecdsa_app.post('/verify_signature')\ndef signature_verify():\n input = request.get_json()\n token = f.encrypt(str(input['data']).encode())\n signature_ = sk.sign(token)\n finaloutput = {}\n try:\n if vk.verify(signature_, token):\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n except:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'signature is invalid'\n return finaloutput\n\n\n@ecdsa_app.post('/verify')\ndef verify_fun():\n data = request.get_json()\n output = check_operator_verify(data)\n finaloutput = {}\n if output:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator or signature is invalid'\n return finaloutput\n\n\ndef check_operator_verify(input):\n try:\n operators = ['+', '-', '*', '/', '**', '//', '%']\n if input['data']['op'] in operators:\n token = f.encrypt(str(input['data']).encode())\n reverse_signature = bytes.fromhex(input['signature'])\n return vk.verify(reverse_signature, token)\n except:\n pass\n",
"step-4": "from flask import Blueprint, request\nfrom ecdsa import SigningKey, NIST384p\nimport base64, codecs\nfrom cryptography.fernet import Fernet\necdsa_app = Blueprint('ecdsa_app', __name__, url_prefix='/ecdsa_app')\nf = Fernet(Fernet.generate_key())\nsk = SigningKey.generate(curve=NIST384p)\nvk = sk.get_verifying_key()\n\n\n@ecdsa_app.get('/create_pkey')\ndef private_key():\n return {'status': 'success', 'result': sk.to_string().hex()}\n\n\n@ecdsa_app.post('/op')\ndef check_op():\n input = request.get_json()\n operators = ['+', '-', '*', '/', '**', '//', '%']\n finaloutput = {}\n if input['data']['op'] in operators:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n finaloutput['result'] = str(input['data'])\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator'\n return finaloutput\n\n\n@ecdsa_app.post('/verify_signature')\ndef signature_verify():\n input = request.get_json()\n token = f.encrypt(str(input['data']).encode())\n signature_ = sk.sign(token)\n finaloutput = {}\n try:\n if vk.verify(signature_, token):\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n except:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'signature is invalid'\n return finaloutput\n\n\n@ecdsa_app.post('/verify')\ndef verify_fun():\n data = request.get_json()\n output = check_operator_verify(data)\n finaloutput = {}\n if output:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator or signature is invalid'\n return finaloutput\n\n\ndef check_operator_verify(input):\n try:\n operators = ['+', '-', '*', '/', '**', '//', '%']\n if input['data']['op'] in operators:\n token = f.encrypt(str(input['data']).encode())\n reverse_signature = bytes.fromhex(input['signature'])\n return vk.verify(reverse_signature, token)\n except:\n pass\n",
"step-5": "from flask import Blueprint, request\nfrom ecdsa import SigningKey, NIST384p\nimport base64, codecs\nfrom cryptography.fernet import Fernet\n\necdsa_app = Blueprint('ecdsa_app', __name__, url_prefix='/ecdsa_app')\nf = Fernet(Fernet.generate_key())\n\nsk = SigningKey.generate(curve=NIST384p)\nvk = sk.get_verifying_key()\n\n\n\n\n@ecdsa_app.get('/create_pkey')\ndef private_key():\n #reverse = bytes.fromhex(sk.to_string().hex()) \n return {\"status\":\"success\", \"result\":sk.to_string().hex()}\n\n@ecdsa_app.post('/op')\ndef check_op():\n input = request.get_json()\n operators = ['+', '-', '*', '/', '**', '//', '%']\n finaloutput = {}\n if input['data']['op'] in operators:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n finaloutput['result'] = str(input['data'])\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator'\n return finaloutput\n\n@ecdsa_app.post('/verify_signature')\ndef signature_verify():\n\n input = request.get_json()\n token = f.encrypt(str(input['data']).encode())\n #reverse_signature = bytes.fromhex(input[\"signature\"])\n signature_ = sk.sign(token)\n finaloutput = {}\n try:\n if (vk.verify(signature_, token)):\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n except:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'signature is invalid'\n\n return finaloutput\n\n\n@ecdsa_app.post('/verify')\ndef verify_fun():\n data = request.get_json()\n output = check_operator_verify(data)\n finaloutput ={}\n if output:\n finaloutput['status'] = 'success'\n finaloutput['message'] = 'successfully verified'\n else:\n finaloutput['status'] = 'failure'\n finaloutput['message'] = 'invalid operator or signature is invalid'\n return finaloutput\n\ndef check_operator_verify(input):\n try:\n operators = ['+', '-', '*', '/', '**', '//', '%']\n if input['data']['op'] in operators:\n token = f.encrypt(str(input['data']).encode())\n reverse_signature = bytes.fromhex(input[\"signature\"])\n return (vk.verify(reverse_signature, token))\n except:\n pass\n\n ",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ax.set_aspect('equal')
<|reserved_special_token_1|>
ax = fig.gca()
ax.set_aspect('equal')
<|reserved_special_token_1|>
# Adjust figure when using plt.gcf
ax = fig.gca()
ax.set_aspect('equal')
|
flexible
|
{
"blob_id": "24246427e2fde47bbc9d068605301f54c6ecbae5",
"index": 1797,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.set_aspect('equal')\n",
"step-3": "ax = fig.gca()\nax.set_aspect('equal')\n",
"step-4": "# Adjust figure when using plt.gcf\nax = fig.gca()\nax.set_aspect('equal')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# write dictionary objects to be stored in a binary file
import pickle
#dictionary objects to be stored in a binary file
emp1 = {"Empno" : 1201, "Name" : "Anushree", "Age" : 25, "Salary" : 47000}
emp2 = {"Empno" : 1211, "Name" : "Zoya", "Age" : 30, "Salary" : 48000}
emp3 = {"Empno" : 1251, "Name" : "Simarjeet", "Age" : 27, "Salary" : 49000}
emp4 = {"Empno" : 1266, "Name" : "Alex", "Age" : 29, "Salary" : 50000}
empObj = open('Emp.dat',"wb")
#write onto the file
pickle.dump(emp1,empObj)
pickle.dump(emp2,empObj)
pickle.dump(emp3,empObj)
pickle.dump(emp4,empObj)
print("Successfully written four dictionaries")
empObj.close()
|
normal
|
{
"blob_id": "23937ae531cc95069a1319f8c77a459ba7645363",
"index": 4331,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-3": "<mask token>\nemp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}\nemp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}\nemp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}\nemp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}\nempObj = open('Emp.dat', 'wb')\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-4": "import pickle\nemp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}\nemp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}\nemp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}\nemp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}\nempObj = open('Emp.dat', 'wb')\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-5": "# write dictionary objects to be stored in a binary file\n\n\nimport pickle\n#dictionary objects to be stored in a binary file\nemp1 = {\"Empno\" : 1201, \"Name\" : \"Anushree\", \"Age\" : 25, \"Salary\" : 47000}\nemp2 = {\"Empno\" : 1211, \"Name\" : \"Zoya\", \"Age\" : 30, \"Salary\" : 48000}\nemp3 = {\"Empno\" : 1251, \"Name\" : \"Simarjeet\", \"Age\" : 27, \"Salary\" : 49000}\nemp4 = {\"Empno\" : 1266, \"Name\" : \"Alex\", \"Age\" : 29, \"Salary\" : 50000}\n\nempObj = open('Emp.dat',\"wb\")\n\n#write onto the file\n\npickle.dump(emp1,empObj)\npickle.dump(emp2,empObj)\npickle.dump(emp3,empObj)\npickle.dump(emp4,empObj)\n\nprint(\"Successfully written four dictionaries\")\nempObj.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
''' The previous code does not correcly compute the stiffening coefficients
This program uses the clustering data to re-compute the stiffening coefficients '''
import glob
import sys
import time
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
def LoadClusterHistogram(inputfile):
f = open(inputfile)
data = []
while True:
fields = f.readline().strip().split(',')
if len(fields)>1:
nAtomsInCluster = float(fields[0])
nClusters = float(fields[1])
data.append((nAtomsInCluster, nClusters))
else:
break
return data
def NIntsBetweenTerminalGroupsMax(nGroups):
return nGroups*(nGroups-1)*0.5
def NIntsBetweenTerminalGroupsMin(nGroups):
return nGroups - 1
def NTerminalGroupsInCluster(nAtomsInCluster, moltype):
nAtomsPerGroup = {'EtOCSMethyl': 1.0, 'EtOCSVinyl': 2.0, 'EtOCSPhenyl': 6.0}
return int(nAtomsInCluster/nAtomsPerGroup[moltype])
def ComputeStiffening(data, moltype):
# the min and max number of interactions between pairs of terminal groups
nAtomIntsPerPairOfGroupsMin = {'EtOCSMethyl': 1, 'EtOCSVinyl': 1, 'EtOCSPhenyl': 4}
nAtomIntsPerPairOfGroupsMax = {'EtOCSMethyl': 1, 'EtOCSVinyl': 4, 'EtOCSPhenyl': 36}
nStericInteractionsMin = 0 # gamma_min
nStericInteractionsMax = 0 # gamma_max
for cluster in data:
nAtomsInCluster, nClusters = cluster
nTerminalGroups = NTerminalGroupsInCluster(nAtomsInCluster, moltype)
nGroupIntsMin = NIntsBetweenTerminalGroupsMin(nTerminalGroups)
nGroupIntsMax = NIntsBetweenTerminalGroupsMax(nTerminalGroups)
nStericInteractionsMin += nGroupIntsMin * nAtomIntsPerPairOfGroupsMin[moltype] * nClusters
nStericInteractionsMax += nGroupIntsMax * nAtomIntsPerPairOfGroupsMax[moltype] * nClusters
return (nStericInteractionsMin, nStericInteractionsMax)
def ComputeStiffeningOH(data):
nStericInteractionsMin = 0 # gamma_min
nStericInteractionsMax = 0 # gamma_max
for cluster in data:
nAtomsInCluster, nClusters = cluster
nStericInteractionsMin += (nAtomsInCluster-1)*nClusters
nStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters
return (nStericInteractionsMin, nStericInteractionsMax)
def ComputeStiffeningCoeffs(data):
nStericInteractionsMin = 0 # gamma_min
nStericInteractionsMax = 0 # gamma_max
for cluster in data:
nAtomsInCluster, nClusters = cluster
nStericInteractionsMin += (nAtomsInCluster-1)*nClusters
nStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters
return (nStericInteractionsMin, nStericInteractionsMax)
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------#
if len(sys.argv) < 2:
print 'Usage:'
print ' python %s <precursor type> [OH - False]' %sys.argv[0]
exit()
moltype = sys.argv[1]
if len(sys.argv) > 2:
OHGroups = True
else:
OHGroups = False
t0 = time.time()
# get all the relevant files and process each network
inputfiles = glob.glob('{}_*.txt'.format(moltype))
# write all the results to the same file
f = open('steric_interactions.txt', 'w')
f.write('Filename : gamma_min, gamma_max\n')
for inputfile in inputfiles:
print 'Working with %s...' %inputfile
data = LoadClusterHistogram(inputfile)
gamma_min, gamma_max = ComputeStiffeningCoeffs(data)
# if OHGroups:
# gamma_min, gamma_max = ComputeStiffeningOH(data)
# else:
# gamma_min, gamma_max = ComputeStiffening(data, moltype)
f.write('%s : %.4f, %.4f\n' %(inputfile, gamma_min, gamma_max))
f.close()
print 'Analyzed network in %.4f seconds.' %(time.time()-t0)
|
normal
|
{
"blob_id": "095d7abfc8297e0bf741a4ebb351a7776055623f",
"index": 326,
"step-1": "''' The previous code does not correcly compute the stiffening coefficients \nThis program uses the clustering data to re-compute the stiffening coefficients '''\n\nimport glob\nimport sys\nimport time\n\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n\ndef LoadClusterHistogram(inputfile):\n\tf = open(inputfile)\n\n\tdata = []\n\twhile True:\n\t\tfields = f.readline().strip().split(',')\n\t\tif len(fields)>1: \n\t\t\tnAtomsInCluster = float(fields[0])\n\t\t\tnClusters = float(fields[1])\n\t\t\tdata.append((nAtomsInCluster, nClusters))\n\t\telse:\n\t\t\tbreak\n\treturn data\n\n\ndef NIntsBetweenTerminalGroupsMax(nGroups):\n\treturn nGroups*(nGroups-1)*0.5\n\ndef NIntsBetweenTerminalGroupsMin(nGroups):\n\treturn nGroups - 1\n\ndef NTerminalGroupsInCluster(nAtomsInCluster, moltype):\n\tnAtomsPerGroup = {'EtOCSMethyl': 1.0, 'EtOCSVinyl': 2.0, 'EtOCSPhenyl': 6.0}\n\treturn int(nAtomsInCluster/nAtomsPerGroup[moltype])\n\ndef ComputeStiffening(data, moltype):\n\t# the min and max number of interactions between pairs of terminal groups \n\tnAtomIntsPerPairOfGroupsMin = {'EtOCSMethyl': 1, 'EtOCSVinyl': 1, 'EtOCSPhenyl': 4}\n\tnAtomIntsPerPairOfGroupsMax = {'EtOCSMethyl': 1, 'EtOCSVinyl': 4, 'EtOCSPhenyl': 36} \n\n\tnStericInteractionsMin = 0 # gamma_min\n\tnStericInteractionsMax = 0 # gamma_max\n\tfor cluster in data:\n\t\tnAtomsInCluster, nClusters = cluster\n\n\t\tnTerminalGroups = NTerminalGroupsInCluster(nAtomsInCluster, moltype)\n\t\t\n\t\tnGroupIntsMin = NIntsBetweenTerminalGroupsMin(nTerminalGroups)\n\t\tnGroupIntsMax = NIntsBetweenTerminalGroupsMax(nTerminalGroups)\n\n\t\tnStericInteractionsMin += nGroupIntsMin * nAtomIntsPerPairOfGroupsMin[moltype] * nClusters\n\t\tnStericInteractionsMax += nGroupIntsMax * nAtomIntsPerPairOfGroupsMax[moltype] * nClusters\n\n\treturn (nStericInteractionsMin, nStericInteractionsMax)\n\ndef ComputeStiffeningOH(data):\n\tnStericInteractionsMin = 0 # gamma_min\n\tnStericInteractionsMax = 0 # gamma_max\n\tfor cluster in data:\n\t\tnAtomsInCluster, nClusters = cluster\n\n\t\tnStericInteractionsMin += (nAtomsInCluster-1)*nClusters\n\t\tnStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters\n\n\treturn (nStericInteractionsMin, nStericInteractionsMax)\n\n\ndef ComputeStiffeningCoeffs(data):\n\tnStericInteractionsMin = 0 # gamma_min\n\tnStericInteractionsMax = 0 # gamma_max\n\tfor cluster in data:\n\t\tnAtomsInCluster, nClusters = cluster\n\n\t\tnStericInteractionsMin += (nAtomsInCluster-1)*nClusters\n\t\tnStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters\n\n\treturn (nStericInteractionsMin, nStericInteractionsMax)\n\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n\nif len(sys.argv) < 2:\n\tprint 'Usage:'\n\tprint ' python %s <precursor type> [OH - False]' %sys.argv[0]\n\texit()\n\nmoltype = sys.argv[1]\n\nif len(sys.argv) > 2: \n\tOHGroups = True\nelse:\n\tOHGroups = False\n\nt0 = time.time()\n\t\t\n# get all the relevant files and process each network\ninputfiles = glob.glob('{}_*.txt'.format(moltype))\n\n# write all the results to the same file\nf = open('steric_interactions.txt', 'w')\nf.write('Filename : gamma_min, gamma_max\\n')\n\nfor inputfile in inputfiles:\n\n\tprint 'Working with %s...' %inputfile\n\t\n\tdata = LoadClusterHistogram(inputfile)\n\tgamma_min, gamma_max = ComputeStiffeningCoeffs(data)\n\n\t# if OHGroups:\n\t# \tgamma_min, gamma_max = ComputeStiffeningOH(data)\n\t# else:\n\t# \tgamma_min, gamma_max = ComputeStiffening(data, moltype)\n\n\tf.write('%s : %.4f, %.4f\\n' %(inputfile, gamma_min, gamma_max))\n\nf.close()\n\nprint 'Analyzed network in %.4f seconds.' %(time.time()-t0) ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from typing import Union
from django.db.models import Q, Value
from django.db.models.functions import Lower, Replace, Trim
from .normalization import (
normalize_doi,
normalize_funkcja_autora,
normalize_grupa_pracownicza,
normalize_isbn,
normalize_kod_dyscypliny,
normalize_nazwa_dyscypliny,
normalize_nazwa_jednostki,
normalize_nazwa_wydawcy,
normalize_public_uri,
normalize_tytul_naukowy,
normalize_tytul_publikacji,
normalize_tytul_zrodla,
normalize_wymiar_etatu,
)
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.search import TrigramSimilarity
from bpp.models import (
Autor,
Autor_Jednostka,
Dyscyplina_Naukowa,
Funkcja_Autora,
Grupa_Pracownicza,
Jednostka,
Rekord,
Tytul,
Wydawca,
Wydawnictwo_Ciagle,
Wydawnictwo_Zwarte,
Wydzial,
Wymiar_Etatu,
Zrodlo,
)
from bpp.util import fail_if_seq_scan
def matchuj_wydzial(nazwa):
try:
return Wydzial.objects.get(nazwa__iexact=nazwa.strip())
except Wydzial.DoesNotExist:
pass
def matchuj_tytul(tytul: str, create_if_not_exist=False) -> Tytul:
"""
Dostaje tytuł: pełną nazwę albo skrót
"""
try:
return Tytul.objects.get(nazwa__iexact=tytul)
except (Tytul.DoesNotExist, Tytul.MultipleObjectsReturned):
return Tytul.objects.get(skrot=normalize_tytul_naukowy(tytul))
def matchuj_funkcja_autora(funkcja_autora: str) -> Funkcja_Autora:
funkcja_autora = normalize_funkcja_autora(funkcja_autora)
return Funkcja_Autora.objects.get(
Q(nazwa__iexact=funkcja_autora) | Q(skrot__iexact=funkcja_autora)
)
def matchuj_grupa_pracownicza(grupa_pracownicza: str) -> Grupa_Pracownicza:
grupa_pracownicza = normalize_grupa_pracownicza(grupa_pracownicza)
return Grupa_Pracownicza.objects.get(nazwa__iexact=grupa_pracownicza)
def matchuj_wymiar_etatu(wymiar_etatu: str) -> Wymiar_Etatu:
wymiar_etatu = normalize_wymiar_etatu(wymiar_etatu)
return Wymiar_Etatu.objects.get(nazwa__iexact=wymiar_etatu)
def matchuj_jednostke(nazwa, wydzial=None):
nazwa = normalize_nazwa_jednostki(nazwa)
try:
return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(skrot__iexact=nazwa))
except Jednostka.DoesNotExist:
if nazwa.endswith("."):
nazwa = nazwa[:-1].strip()
try:
return Jednostka.objects.get(
Q(nazwa__istartswith=nazwa) | Q(skrot__istartswith=nazwa)
)
except Jednostka.MultipleObjectsReturned as e:
if wydzial is None:
raise e
return Jednostka.objects.get(
Q(nazwa__istartswith=nazwa) | Q(skrot__istartswith=nazwa),
Q(wydzial__nazwa__iexact=wydzial),
)
except Jednostka.MultipleObjectsReturned as e:
if wydzial is None:
raise e
return Jednostka.objects.get(
Q(nazwa__iexact=nazwa) | Q(skrot__iexact=nazwa),
Q(wydzial__nazwa__iexact=wydzial),
)
def matchuj_autora(
imiona: str,
nazwisko: str,
jednostka: Union[Jednostka, None] = None,
bpp_id: Union[int, None] = None,
pbn_uid_id: Union[str, None] = None,
system_kadrowy_id: Union[int, None] = None,
pbn_id: Union[int, None] = None,
orcid: Union[str, None] = None,
tytul_str: Union[Tytul, None] = None,
):
if bpp_id is not None:
try:
return Autor.objects.get(pk=bpp_id)
except Autor.DoesNotExist:
pass
if orcid:
try:
return Autor.objects.get(orcid__iexact=orcid.strip())
except Autor.DoesNotExist:
pass
if pbn_uid_id is not None and pbn_uid_id.strip() != "":
# Może być > 1 autor z takim pbn_uid_id
_qset = Autor.objects.filter(pbn_uid_id=pbn_uid_id)
if _qset.exists():
return _qset.first()
if system_kadrowy_id is not None:
try:
int(system_kadrowy_id)
except (TypeError, ValueError):
system_kadrowy_id = None
if system_kadrowy_id is not None:
try:
return Autor.objects.get(system_kadrowy_id=system_kadrowy_id)
except Autor.DoesNotExist:
pass
if pbn_id is not None:
if isinstance(pbn_id, str):
pbn_id = pbn_id.strip()
try:
pbn_id = int(pbn_id)
except (TypeError, ValueError):
pbn_id = None
if pbn_id is not None:
try:
return Autor.objects.get(pbn_id=pbn_id)
except Autor.DoesNotExist:
pass
queries = [
Q(
Q(nazwisko__iexact=nazwisko.strip())
| Q(poprzednie_nazwiska__icontains=nazwisko.strip()),
imiona__iexact=imiona.strip(),
)
]
if tytul_str:
queries.append(queries[0] & Q(tytul__skrot=tytul_str))
for qry in queries:
try:
return Autor.objects.get(qry)
except (Autor.DoesNotExist, Autor.MultipleObjectsReturned):
pass
try:
return Autor.objects.get(qry & Q(aktualna_jednostka=jednostka))
except (Autor.MultipleObjectsReturned, Autor.DoesNotExist):
pass
# Jesteśmy tutaj. Najwyraźniej poszukiwanie po aktualnej jednostce, imieniu, nazwisku,
# tytule itp nie bardzo się powiodło. Spróbujmy innej strategii -- jednostka jest
# określona, poszukajmy w jej autorach. Wszak nie musi być ta jednostka jednostką
# aktualną...
if jednostka:
queries = [
Q(
Q(autor__nazwisko__iexact=nazwisko.strip())
| Q(autor__poprzednie_nazwiska__icontains=nazwisko.strip()),
autor__imiona__iexact=imiona.strip(),
)
]
if tytul_str:
queries.append(queries[0] & Q(autor__tytul__skrot=tytul_str))
for qry in queries:
try:
return jednostka.autor_jednostka_set.get(qry).autor
except (
Autor_Jednostka.MultipleObjectsReturned,
Autor_Jednostka.DoesNotExist,
):
pass
return None
def matchuj_zrodlo(
s: Union[str, None],
issn: Union[str, None] = None,
e_issn: Union[str, None] = None,
alt_nazwa=None,
) -> Union[None, Zrodlo]:
if s is None or str(s) == "":
return
if issn is not None:
try:
return Zrodlo.objects.get(issn=issn)
except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):
pass
if e_issn is not None:
try:
return Zrodlo.objects.get(e_issn=e_issn)
except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):
pass
for elem in s, alt_nazwa:
if elem is None:
continue
elem = normalize_tytul_zrodla(elem)
try:
return Zrodlo.objects.get(Q(nazwa__iexact=elem) | Q(skrot__iexact=elem))
except Zrodlo.MultipleObjectsReturned:
pass
except Zrodlo.DoesNotExist:
if elem.endswith("."):
try:
return Zrodlo.objects.get(
Q(nazwa__istartswith=elem[:-1])
| Q(skrot__istartswith=elem[:-1])
)
except Zrodlo.DoesNotExist:
pass
except Zrodlo.MultipleObjectsReturned:
pass
def matchuj_dyscypline(kod, nazwa):
nazwa = normalize_nazwa_dyscypliny(nazwa)
try:
return Dyscyplina_Naukowa.objects.get(nazwa=nazwa)
except Dyscyplina_Naukowa.DoesNotExist:
pass
except Dyscyplina_Naukowa.MultipleObjectsReturned:
pass
kod = normalize_kod_dyscypliny(kod)
try:
return Dyscyplina_Naukowa.objects.get(kod=kod)
except Dyscyplina_Naukowa.DoesNotExist:
pass
except Dyscyplina_Naukowa.MultipleObjectsReturned:
pass
def matchuj_wydawce(nazwa, pbn_uid_id=None, similarity=0.9):
nazwa = normalize_nazwa_wydawcy(nazwa)
try:
return Wydawca.objects.get(nazwa=nazwa, alias_dla_id=None)
except Wydawca.DoesNotExist:
pass
if pbn_uid_id is not None:
try:
return Wydawca.objects.get(pbn_uid_id=pbn_uid_id)
except Wydawca.DoesNotExist:
pass
loose = (
Wydawca.objects.annotate(similarity=TrigramSimilarity("nazwa", nazwa))
.filter(similarity__gte=similarity)
.order_by("-similarity")[:5]
)
if loose.count() > 0 and loose.count() < 2:
return loose.first()
TITLE_LIMIT_SINGLE_WORD = 15
TITLE_LIMIT_MANY_WORDS = 25
MATCH_SIMILARITY_THRESHOLD = 0.95
MATCH_SIMILARITY_THRESHOLD_LOW = 0.90
MATCH_SIMILARITY_THRESHOLD_VERY_LOW = 0.80
# Znormalizowany tytuł w bazie danych -- wyrzucony ciąg znaków [online], podwójne
# spacje pozamieniane na pojedyncze, trim całości
normalized_db_title = Trim(
Replace(
Replace(Lower("tytul_oryginalny"), Value(" [online]"), Value("")),
Value(" "),
Value(" "),
)
)
# Znormalizowany skrót nazwy źródła -- wyrzucone spacje i kropki, trim, zmniejszone
# znaki
normalized_db_zrodlo_skrot = Trim(
Replace(
Replace(
Replace(Lower("skrot"), Value(" "), Value("")),
Value("-"),
Value(""),
),
Value("."),
Value(""),
)
)
def normalize_zrodlo_skrot_for_db_lookup(s):
return s.lower().replace(" ", "").strip().replace("-", "").replace(".", "")
# Znormalizowany skrot zrodla do wyszukiwania -- wyrzucone wszystko procz kropek
normalized_db_zrodlo_nazwa = Trim(
Replace(Lower("nazwa"), Value(" "), Value("")),
)
def normalize_zrodlo_nazwa_for_db_lookup(s):
return s.lower().replace(" ", "").strip()
normalized_db_isbn = Trim(Replace(Lower("isbn"), Value("-"), Value("")))
def matchuj_publikacje(
klass: [Wydawnictwo_Zwarte, Wydawnictwo_Ciagle, Rekord],
title,
year,
doi=None,
public_uri=None,
isbn=None,
zrodlo=None,
DEBUG_MATCHOWANIE=False,
isbn_matchuj_tylko_nadrzedne=True,
doi_matchuj_tylko_nadrzedne=True,
):
if doi is not None:
doi = normalize_doi(doi)
if doi:
zapytanie = klass.objects.filter(doi__istartswith=doi, rok=year)
if doi_matchuj_tylko_nadrzedne:
if hasattr(klass, "wydawnictwo_nadrzedne_id"):
zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)
res = zapytanie.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
).order_by("-podobienstwo")[:2]
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:
return res.first()
title = normalize_tytul_publikacji(title)
title_has_spaces = False
if title is not None:
title_has_spaces = title.find(" ") > 0
if title is not None and (
(not title_has_spaces and len(title) >= TITLE_LIMIT_SINGLE_WORD)
or (title_has_spaces and len(title) >= TITLE_LIMIT_MANY_WORDS)
):
if zrodlo is not None and hasattr(klass, "zrodlo"):
try:
return klass.objects.get(
tytul_oryginalny__istartswith=title, rok=year, zrodlo=zrodlo
)
except klass.DoesNotExist:
pass
except klass.MultipleObjectsReturned:
print(
f"PPP ZZZ MultipleObjectsReturned dla title={title} rok={year} zrodlo={zrodlo}"
)
if (
isbn is not None
and isbn != ""
and hasattr(klass, "isbn")
and hasattr(klass, "e_isbn")
):
ni = normalize_isbn(isbn)
zapytanie = klass.objects.exclude(isbn=None, e_isbn=None).exclude(
isbn="", e_isbn=""
)
if isbn_matchuj_tylko_nadrzedne:
zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)
if klass == Rekord:
zapytanie = zapytanie.filter(
pk__in=[
(ContentType.objects.get_for_model(Wydawnictwo_Zwarte).pk, x)
for x in Wydawnictwo_Zwarte.objects.wydawnictwa_nadrzedne_dla_innych()
]
)
elif klass == Wydawnictwo_Zwarte:
zapytanie = zapytanie.filter(
pk__in=Wydawnictwo_Zwarte.objects.wydawnictwa_nadrzedne_dla_innych()
)
else:
raise NotImplementedError(
"Matchowanie po ISBN dla czegoś innego niż wydawnictwo zwarte nie opracowane"
)
#
# Uwaga uwaga uwaga.
#
# Gdy matchujemy ISBN, to w BPP dochodzi do takiej nieciekawej sytuacji: wpisywany jest
# ISBN zarówno dla rozdziałów jak i dla wydawnictw nadrzędnych.
#
# Zatem, na ten moment, aby usprawnić matchowanie ISBN, jeżeli ustawiona jest flaga
# isbn_matchuj_tylko_nadrzedne, to system bedzie szukał tylko i wyłącznie wśród
# rekordów będących wydawnictwami nadrzędnymi (czyli nie mającymi rekordów podrzędnych)
#
res = (
zapytanie.filter(Q(isbn=ni) | Q(e_isbn=ni))
.annotate(
podobienstwo=TrigramSimilarity(
normalized_db_title,
title.lower(),
)
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:
return res.first()
public_uri = normalize_public_uri(public_uri)
if public_uri:
res = (
klass.objects.filter(Q(www=public_uri) | Q(public_www=public_uri))
.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:
return res.first()
if title is not None and (
(not title_has_spaces and len(title) >= TITLE_LIMIT_SINGLE_WORD)
or (title_has_spaces and len(title) >= TITLE_LIMIT_MANY_WORDS)
):
res = (
klass.objects.filter(tytul_oryginalny__istartswith=title, rok=year)
.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:
return res.first()
# Ostatnia szansa, po podobieństwie, niski próg
res = (
klass.objects.filter(rok=year)
.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_LOW:
return res.first()
|
normal
|
{
"blob_id": "47025a30d79341ff0819fe87638e35960a5fc87d",
"index": 6446,
"step-1": "<mask token>\n\n\ndef matchuj_wydzial(nazwa):\n try:\n return Wydzial.objects.get(nazwa__iexact=nazwa.strip())\n except Wydzial.DoesNotExist:\n pass\n\n\ndef matchuj_tytul(tytul: str, create_if_not_exist=False) ->Tytul:\n \"\"\"\n Dostaje tytuł: pełną nazwę albo skrót\n \"\"\"\n try:\n return Tytul.objects.get(nazwa__iexact=tytul)\n except (Tytul.DoesNotExist, Tytul.MultipleObjectsReturned):\n return Tytul.objects.get(skrot=normalize_tytul_naukowy(tytul))\n\n\ndef matchuj_funkcja_autora(funkcja_autora: str) ->Funkcja_Autora:\n funkcja_autora = normalize_funkcja_autora(funkcja_autora)\n return Funkcja_Autora.objects.get(Q(nazwa__iexact=funkcja_autora) | Q(\n skrot__iexact=funkcja_autora))\n\n\ndef matchuj_grupa_pracownicza(grupa_pracownicza: str) ->Grupa_Pracownicza:\n grupa_pracownicza = normalize_grupa_pracownicza(grupa_pracownicza)\n return Grupa_Pracownicza.objects.get(nazwa__iexact=grupa_pracownicza)\n\n\n<mask token>\n\n\ndef matchuj_jednostke(nazwa, wydzial=None):\n nazwa = normalize_nazwa_jednostki(nazwa)\n try:\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa))\n except Jednostka.DoesNotExist:\n if nazwa.endswith('.'):\n nazwa = nazwa[:-1].strip()\n try:\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n\n\n<mask token>\n\n\ndef matchuj_zrodlo(s: Union[str, None], issn: Union[str, None]=None, e_issn:\n Union[str, None]=None, alt_nazwa=None) ->Union[None, Zrodlo]:\n if s is None or str(s) == '':\n return\n if issn is not None:\n try:\n return Zrodlo.objects.get(issn=issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n if e_issn is not None:\n try:\n return Zrodlo.objects.get(e_issn=e_issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n for elem in (s, alt_nazwa):\n if elem is None:\n continue\n elem = normalize_tytul_zrodla(elem)\n try:\n return Zrodlo.objects.get(Q(nazwa__iexact=elem) | Q(\n skrot__iexact=elem))\n except Zrodlo.MultipleObjectsReturned:\n pass\n except Zrodlo.DoesNotExist:\n if elem.endswith('.'):\n try:\n return Zrodlo.objects.get(Q(nazwa__istartswith=elem[:-1\n ]) | Q(skrot__istartswith=elem[:-1]))\n except Zrodlo.DoesNotExist:\n pass\n except Zrodlo.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_dyscypline(kod, nazwa):\n nazwa = normalize_nazwa_dyscypliny(nazwa)\n try:\n return Dyscyplina_Naukowa.objects.get(nazwa=nazwa)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n kod = normalize_kod_dyscypliny(kod)\n try:\n return Dyscyplina_Naukowa.objects.get(kod=kod)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_wydawce(nazwa, pbn_uid_id=None, similarity=0.9):\n nazwa = normalize_nazwa_wydawcy(nazwa)\n try:\n return Wydawca.objects.get(nazwa=nazwa, alias_dla_id=None)\n except Wydawca.DoesNotExist:\n pass\n if pbn_uid_id is not None:\n try:\n return Wydawca.objects.get(pbn_uid_id=pbn_uid_id)\n except Wydawca.DoesNotExist:\n pass\n loose = Wydawca.objects.annotate(similarity=TrigramSimilarity('nazwa',\n nazwa)).filter(similarity__gte=similarity).order_by('-similarity')[:5]\n if loose.count() > 0 and loose.count() < 2:\n return loose.first()\n\n\n<mask token>\n\n\ndef normalize_zrodlo_skrot_for_db_lookup(s):\n return s.lower().replace(' ', '').strip().replace('-', '').replace('.', '')\n\n\n<mask token>\n\n\ndef normalize_zrodlo_nazwa_for_db_lookup(s):\n return s.lower().replace(' ', '').strip()\n\n\n<mask token>\n\n\ndef matchuj_publikacje(klass: [Wydawnictwo_Zwarte, Wydawnictwo_Ciagle,\n Rekord], title, year, doi=None, public_uri=None, isbn=None, zrodlo=None,\n DEBUG_MATCHOWANIE=False, isbn_matchuj_tylko_nadrzedne=True,\n doi_matchuj_tylko_nadrzedne=True):\n if doi is not None:\n doi = normalize_doi(doi)\n if doi:\n zapytanie = klass.objects.filter(doi__istartswith=doi, rok=year)\n if doi_matchuj_tylko_nadrzedne:\n if hasattr(klass, 'wydawnictwo_nadrzedne_id'):\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n res = zapytanie.annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[\n :2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first(\n ).podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n title = normalize_tytul_publikacji(title)\n title_has_spaces = False\n if title is not None:\n title_has_spaces = title.find(' ') > 0\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n if zrodlo is not None and hasattr(klass, 'zrodlo'):\n try:\n return klass.objects.get(tytul_oryginalny__istartswith=\n title, rok=year, zrodlo=zrodlo)\n except klass.DoesNotExist:\n pass\n except klass.MultipleObjectsReturned:\n print(\n f'PPP ZZZ MultipleObjectsReturned dla title={title} rok={year} zrodlo={zrodlo}'\n )\n if isbn is not None and isbn != '' and hasattr(klass, 'isbn') and hasattr(\n klass, 'e_isbn'):\n ni = normalize_isbn(isbn)\n zapytanie = klass.objects.exclude(isbn=None, e_isbn=None).exclude(isbn\n ='', e_isbn='')\n if isbn_matchuj_tylko_nadrzedne:\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n if klass == Rekord:\n zapytanie = zapytanie.filter(pk__in=[(ContentType.objects.\n get_for_model(Wydawnictwo_Zwarte).pk, x) for x in\n Wydawnictwo_Zwarte.objects.\n wydawnictwa_nadrzedne_dla_innych()])\n elif klass == Wydawnictwo_Zwarte:\n zapytanie = zapytanie.filter(pk__in=Wydawnictwo_Zwarte.\n objects.wydawnictwa_nadrzedne_dla_innych())\n else:\n raise NotImplementedError(\n 'Matchowanie po ISBN dla czegoś innego niż wydawnictwo zwarte nie opracowane'\n )\n res = zapytanie.filter(Q(isbn=ni) | Q(e_isbn=ni)).annotate(podobienstwo\n =TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n public_uri = normalize_public_uri(public_uri)\n if public_uri:\n res = klass.objects.filter(Q(www=public_uri) | Q(public_www=public_uri)\n ).annotate(podobienstwo=TrigramSimilarity(normalized_db_title,\n title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n res = klass.objects.filter(tytul_oryginalny__istartswith=title, rok\n =year).annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n res = klass.objects.filter(rok=year).annotate(podobienstwo=\n TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_LOW:\n return res.first()\n",
"step-2": "<mask token>\n\n\ndef matchuj_wydzial(nazwa):\n try:\n return Wydzial.objects.get(nazwa__iexact=nazwa.strip())\n except Wydzial.DoesNotExist:\n pass\n\n\ndef matchuj_tytul(tytul: str, create_if_not_exist=False) ->Tytul:\n \"\"\"\n Dostaje tytuł: pełną nazwę albo skrót\n \"\"\"\n try:\n return Tytul.objects.get(nazwa__iexact=tytul)\n except (Tytul.DoesNotExist, Tytul.MultipleObjectsReturned):\n return Tytul.objects.get(skrot=normalize_tytul_naukowy(tytul))\n\n\ndef matchuj_funkcja_autora(funkcja_autora: str) ->Funkcja_Autora:\n funkcja_autora = normalize_funkcja_autora(funkcja_autora)\n return Funkcja_Autora.objects.get(Q(nazwa__iexact=funkcja_autora) | Q(\n skrot__iexact=funkcja_autora))\n\n\ndef matchuj_grupa_pracownicza(grupa_pracownicza: str) ->Grupa_Pracownicza:\n grupa_pracownicza = normalize_grupa_pracownicza(grupa_pracownicza)\n return Grupa_Pracownicza.objects.get(nazwa__iexact=grupa_pracownicza)\n\n\ndef matchuj_wymiar_etatu(wymiar_etatu: str) ->Wymiar_Etatu:\n wymiar_etatu = normalize_wymiar_etatu(wymiar_etatu)\n return Wymiar_Etatu.objects.get(nazwa__iexact=wymiar_etatu)\n\n\ndef matchuj_jednostke(nazwa, wydzial=None):\n nazwa = normalize_nazwa_jednostki(nazwa)\n try:\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa))\n except Jednostka.DoesNotExist:\n if nazwa.endswith('.'):\n nazwa = nazwa[:-1].strip()\n try:\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n\n\n<mask token>\n\n\ndef matchuj_zrodlo(s: Union[str, None], issn: Union[str, None]=None, e_issn:\n Union[str, None]=None, alt_nazwa=None) ->Union[None, Zrodlo]:\n if s is None or str(s) == '':\n return\n if issn is not None:\n try:\n return Zrodlo.objects.get(issn=issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n if e_issn is not None:\n try:\n return Zrodlo.objects.get(e_issn=e_issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n for elem in (s, alt_nazwa):\n if elem is None:\n continue\n elem = normalize_tytul_zrodla(elem)\n try:\n return Zrodlo.objects.get(Q(nazwa__iexact=elem) | Q(\n skrot__iexact=elem))\n except Zrodlo.MultipleObjectsReturned:\n pass\n except Zrodlo.DoesNotExist:\n if elem.endswith('.'):\n try:\n return Zrodlo.objects.get(Q(nazwa__istartswith=elem[:-1\n ]) | Q(skrot__istartswith=elem[:-1]))\n except Zrodlo.DoesNotExist:\n pass\n except Zrodlo.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_dyscypline(kod, nazwa):\n nazwa = normalize_nazwa_dyscypliny(nazwa)\n try:\n return Dyscyplina_Naukowa.objects.get(nazwa=nazwa)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n kod = normalize_kod_dyscypliny(kod)\n try:\n return Dyscyplina_Naukowa.objects.get(kod=kod)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_wydawce(nazwa, pbn_uid_id=None, similarity=0.9):\n nazwa = normalize_nazwa_wydawcy(nazwa)\n try:\n return Wydawca.objects.get(nazwa=nazwa, alias_dla_id=None)\n except Wydawca.DoesNotExist:\n pass\n if pbn_uid_id is not None:\n try:\n return Wydawca.objects.get(pbn_uid_id=pbn_uid_id)\n except Wydawca.DoesNotExist:\n pass\n loose = Wydawca.objects.annotate(similarity=TrigramSimilarity('nazwa',\n nazwa)).filter(similarity__gte=similarity).order_by('-similarity')[:5]\n if loose.count() > 0 and loose.count() < 2:\n return loose.first()\n\n\n<mask token>\n\n\ndef normalize_zrodlo_skrot_for_db_lookup(s):\n return s.lower().replace(' ', '').strip().replace('-', '').replace('.', '')\n\n\n<mask token>\n\n\ndef normalize_zrodlo_nazwa_for_db_lookup(s):\n return s.lower().replace(' ', '').strip()\n\n\n<mask token>\n\n\ndef matchuj_publikacje(klass: [Wydawnictwo_Zwarte, Wydawnictwo_Ciagle,\n Rekord], title, year, doi=None, public_uri=None, isbn=None, zrodlo=None,\n DEBUG_MATCHOWANIE=False, isbn_matchuj_tylko_nadrzedne=True,\n doi_matchuj_tylko_nadrzedne=True):\n if doi is not None:\n doi = normalize_doi(doi)\n if doi:\n zapytanie = klass.objects.filter(doi__istartswith=doi, rok=year)\n if doi_matchuj_tylko_nadrzedne:\n if hasattr(klass, 'wydawnictwo_nadrzedne_id'):\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n res = zapytanie.annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[\n :2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first(\n ).podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n title = normalize_tytul_publikacji(title)\n title_has_spaces = False\n if title is not None:\n title_has_spaces = title.find(' ') > 0\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n if zrodlo is not None and hasattr(klass, 'zrodlo'):\n try:\n return klass.objects.get(tytul_oryginalny__istartswith=\n title, rok=year, zrodlo=zrodlo)\n except klass.DoesNotExist:\n pass\n except klass.MultipleObjectsReturned:\n print(\n f'PPP ZZZ MultipleObjectsReturned dla title={title} rok={year} zrodlo={zrodlo}'\n )\n if isbn is not None and isbn != '' and hasattr(klass, 'isbn') and hasattr(\n klass, 'e_isbn'):\n ni = normalize_isbn(isbn)\n zapytanie = klass.objects.exclude(isbn=None, e_isbn=None).exclude(isbn\n ='', e_isbn='')\n if isbn_matchuj_tylko_nadrzedne:\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n if klass == Rekord:\n zapytanie = zapytanie.filter(pk__in=[(ContentType.objects.\n get_for_model(Wydawnictwo_Zwarte).pk, x) for x in\n Wydawnictwo_Zwarte.objects.\n wydawnictwa_nadrzedne_dla_innych()])\n elif klass == Wydawnictwo_Zwarte:\n zapytanie = zapytanie.filter(pk__in=Wydawnictwo_Zwarte.\n objects.wydawnictwa_nadrzedne_dla_innych())\n else:\n raise NotImplementedError(\n 'Matchowanie po ISBN dla czegoś innego niż wydawnictwo zwarte nie opracowane'\n )\n res = zapytanie.filter(Q(isbn=ni) | Q(e_isbn=ni)).annotate(podobienstwo\n =TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n public_uri = normalize_public_uri(public_uri)\n if public_uri:\n res = klass.objects.filter(Q(www=public_uri) | Q(public_www=public_uri)\n ).annotate(podobienstwo=TrigramSimilarity(normalized_db_title,\n title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n res = klass.objects.filter(tytul_oryginalny__istartswith=title, rok\n =year).annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n res = klass.objects.filter(rok=year).annotate(podobienstwo=\n TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_LOW:\n return res.first()\n",
"step-3": "<mask token>\n\n\ndef matchuj_wydzial(nazwa):\n try:\n return Wydzial.objects.get(nazwa__iexact=nazwa.strip())\n except Wydzial.DoesNotExist:\n pass\n\n\ndef matchuj_tytul(tytul: str, create_if_not_exist=False) ->Tytul:\n \"\"\"\n Dostaje tytuł: pełną nazwę albo skrót\n \"\"\"\n try:\n return Tytul.objects.get(nazwa__iexact=tytul)\n except (Tytul.DoesNotExist, Tytul.MultipleObjectsReturned):\n return Tytul.objects.get(skrot=normalize_tytul_naukowy(tytul))\n\n\ndef matchuj_funkcja_autora(funkcja_autora: str) ->Funkcja_Autora:\n funkcja_autora = normalize_funkcja_autora(funkcja_autora)\n return Funkcja_Autora.objects.get(Q(nazwa__iexact=funkcja_autora) | Q(\n skrot__iexact=funkcja_autora))\n\n\ndef matchuj_grupa_pracownicza(grupa_pracownicza: str) ->Grupa_Pracownicza:\n grupa_pracownicza = normalize_grupa_pracownicza(grupa_pracownicza)\n return Grupa_Pracownicza.objects.get(nazwa__iexact=grupa_pracownicza)\n\n\ndef matchuj_wymiar_etatu(wymiar_etatu: str) ->Wymiar_Etatu:\n wymiar_etatu = normalize_wymiar_etatu(wymiar_etatu)\n return Wymiar_Etatu.objects.get(nazwa__iexact=wymiar_etatu)\n\n\ndef matchuj_jednostke(nazwa, wydzial=None):\n nazwa = normalize_nazwa_jednostki(nazwa)\n try:\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa))\n except Jednostka.DoesNotExist:\n if nazwa.endswith('.'):\n nazwa = nazwa[:-1].strip()\n try:\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n\n\ndef matchuj_autora(imiona: str, nazwisko: str, jednostka: Union[Jednostka,\n None]=None, bpp_id: Union[int, None]=None, pbn_uid_id: Union[str, None]\n =None, system_kadrowy_id: Union[int, None]=None, pbn_id: Union[int,\n None]=None, orcid: Union[str, None]=None, tytul_str: Union[Tytul, None]\n =None):\n if bpp_id is not None:\n try:\n return Autor.objects.get(pk=bpp_id)\n except Autor.DoesNotExist:\n pass\n if orcid:\n try:\n return Autor.objects.get(orcid__iexact=orcid.strip())\n except Autor.DoesNotExist:\n pass\n if pbn_uid_id is not None and pbn_uid_id.strip() != '':\n _qset = Autor.objects.filter(pbn_uid_id=pbn_uid_id)\n if _qset.exists():\n return _qset.first()\n if system_kadrowy_id is not None:\n try:\n int(system_kadrowy_id)\n except (TypeError, ValueError):\n system_kadrowy_id = None\n if system_kadrowy_id is not None:\n try:\n return Autor.objects.get(system_kadrowy_id=system_kadrowy_id)\n except Autor.DoesNotExist:\n pass\n if pbn_id is not None:\n if isinstance(pbn_id, str):\n pbn_id = pbn_id.strip()\n try:\n pbn_id = int(pbn_id)\n except (TypeError, ValueError):\n pbn_id = None\n if pbn_id is not None:\n try:\n return Autor.objects.get(pbn_id=pbn_id)\n except Autor.DoesNotExist:\n pass\n queries = [Q(Q(nazwisko__iexact=nazwisko.strip()) | Q(\n poprzednie_nazwiska__icontains=nazwisko.strip()), imiona__iexact=\n imiona.strip())]\n if tytul_str:\n queries.append(queries[0] & Q(tytul__skrot=tytul_str))\n for qry in queries:\n try:\n return Autor.objects.get(qry)\n except (Autor.DoesNotExist, Autor.MultipleObjectsReturned):\n pass\n try:\n return Autor.objects.get(qry & Q(aktualna_jednostka=jednostka))\n except (Autor.MultipleObjectsReturned, Autor.DoesNotExist):\n pass\n if jednostka:\n queries = [Q(Q(autor__nazwisko__iexact=nazwisko.strip()) | Q(\n autor__poprzednie_nazwiska__icontains=nazwisko.strip()),\n autor__imiona__iexact=imiona.strip())]\n if tytul_str:\n queries.append(queries[0] & Q(autor__tytul__skrot=tytul_str))\n for qry in queries:\n try:\n return jednostka.autor_jednostka_set.get(qry).autor\n except (Autor_Jednostka.MultipleObjectsReturned,\n Autor_Jednostka.DoesNotExist):\n pass\n return None\n\n\ndef matchuj_zrodlo(s: Union[str, None], issn: Union[str, None]=None, e_issn:\n Union[str, None]=None, alt_nazwa=None) ->Union[None, Zrodlo]:\n if s is None or str(s) == '':\n return\n if issn is not None:\n try:\n return Zrodlo.objects.get(issn=issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n if e_issn is not None:\n try:\n return Zrodlo.objects.get(e_issn=e_issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n for elem in (s, alt_nazwa):\n if elem is None:\n continue\n elem = normalize_tytul_zrodla(elem)\n try:\n return Zrodlo.objects.get(Q(nazwa__iexact=elem) | Q(\n skrot__iexact=elem))\n except Zrodlo.MultipleObjectsReturned:\n pass\n except Zrodlo.DoesNotExist:\n if elem.endswith('.'):\n try:\n return Zrodlo.objects.get(Q(nazwa__istartswith=elem[:-1\n ]) | Q(skrot__istartswith=elem[:-1]))\n except Zrodlo.DoesNotExist:\n pass\n except Zrodlo.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_dyscypline(kod, nazwa):\n nazwa = normalize_nazwa_dyscypliny(nazwa)\n try:\n return Dyscyplina_Naukowa.objects.get(nazwa=nazwa)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n kod = normalize_kod_dyscypliny(kod)\n try:\n return Dyscyplina_Naukowa.objects.get(kod=kod)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_wydawce(nazwa, pbn_uid_id=None, similarity=0.9):\n nazwa = normalize_nazwa_wydawcy(nazwa)\n try:\n return Wydawca.objects.get(nazwa=nazwa, alias_dla_id=None)\n except Wydawca.DoesNotExist:\n pass\n if pbn_uid_id is not None:\n try:\n return Wydawca.objects.get(pbn_uid_id=pbn_uid_id)\n except Wydawca.DoesNotExist:\n pass\n loose = Wydawca.objects.annotate(similarity=TrigramSimilarity('nazwa',\n nazwa)).filter(similarity__gte=similarity).order_by('-similarity')[:5]\n if loose.count() > 0 and loose.count() < 2:\n return loose.first()\n\n\n<mask token>\n\n\ndef normalize_zrodlo_skrot_for_db_lookup(s):\n return s.lower().replace(' ', '').strip().replace('-', '').replace('.', '')\n\n\n<mask token>\n\n\ndef normalize_zrodlo_nazwa_for_db_lookup(s):\n return s.lower().replace(' ', '').strip()\n\n\n<mask token>\n\n\ndef matchuj_publikacje(klass: [Wydawnictwo_Zwarte, Wydawnictwo_Ciagle,\n Rekord], title, year, doi=None, public_uri=None, isbn=None, zrodlo=None,\n DEBUG_MATCHOWANIE=False, isbn_matchuj_tylko_nadrzedne=True,\n doi_matchuj_tylko_nadrzedne=True):\n if doi is not None:\n doi = normalize_doi(doi)\n if doi:\n zapytanie = klass.objects.filter(doi__istartswith=doi, rok=year)\n if doi_matchuj_tylko_nadrzedne:\n if hasattr(klass, 'wydawnictwo_nadrzedne_id'):\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n res = zapytanie.annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[\n :2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first(\n ).podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n title = normalize_tytul_publikacji(title)\n title_has_spaces = False\n if title is not None:\n title_has_spaces = title.find(' ') > 0\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n if zrodlo is not None and hasattr(klass, 'zrodlo'):\n try:\n return klass.objects.get(tytul_oryginalny__istartswith=\n title, rok=year, zrodlo=zrodlo)\n except klass.DoesNotExist:\n pass\n except klass.MultipleObjectsReturned:\n print(\n f'PPP ZZZ MultipleObjectsReturned dla title={title} rok={year} zrodlo={zrodlo}'\n )\n if isbn is not None and isbn != '' and hasattr(klass, 'isbn') and hasattr(\n klass, 'e_isbn'):\n ni = normalize_isbn(isbn)\n zapytanie = klass.objects.exclude(isbn=None, e_isbn=None).exclude(isbn\n ='', e_isbn='')\n if isbn_matchuj_tylko_nadrzedne:\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n if klass == Rekord:\n zapytanie = zapytanie.filter(pk__in=[(ContentType.objects.\n get_for_model(Wydawnictwo_Zwarte).pk, x) for x in\n Wydawnictwo_Zwarte.objects.\n wydawnictwa_nadrzedne_dla_innych()])\n elif klass == Wydawnictwo_Zwarte:\n zapytanie = zapytanie.filter(pk__in=Wydawnictwo_Zwarte.\n objects.wydawnictwa_nadrzedne_dla_innych())\n else:\n raise NotImplementedError(\n 'Matchowanie po ISBN dla czegoś innego niż wydawnictwo zwarte nie opracowane'\n )\n res = zapytanie.filter(Q(isbn=ni) | Q(e_isbn=ni)).annotate(podobienstwo\n =TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n public_uri = normalize_public_uri(public_uri)\n if public_uri:\n res = klass.objects.filter(Q(www=public_uri) | Q(public_www=public_uri)\n ).annotate(podobienstwo=TrigramSimilarity(normalized_db_title,\n title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n res = klass.objects.filter(tytul_oryginalny__istartswith=title, rok\n =year).annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n res = klass.objects.filter(rok=year).annotate(podobienstwo=\n TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_LOW:\n return res.first()\n",
"step-4": "from typing import Union\nfrom django.db.models import Q, Value\nfrom django.db.models.functions import Lower, Replace, Trim\nfrom .normalization import normalize_doi, normalize_funkcja_autora, normalize_grupa_pracownicza, normalize_isbn, normalize_kod_dyscypliny, normalize_nazwa_dyscypliny, normalize_nazwa_jednostki, normalize_nazwa_wydawcy, normalize_public_uri, normalize_tytul_naukowy, normalize_tytul_publikacji, normalize_tytul_zrodla, normalize_wymiar_etatu\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom bpp.models import Autor, Autor_Jednostka, Dyscyplina_Naukowa, Funkcja_Autora, Grupa_Pracownicza, Jednostka, Rekord, Tytul, Wydawca, Wydawnictwo_Ciagle, Wydawnictwo_Zwarte, Wydzial, Wymiar_Etatu, Zrodlo\nfrom bpp.util import fail_if_seq_scan\n\n\ndef matchuj_wydzial(nazwa):\n try:\n return Wydzial.objects.get(nazwa__iexact=nazwa.strip())\n except Wydzial.DoesNotExist:\n pass\n\n\ndef matchuj_tytul(tytul: str, create_if_not_exist=False) ->Tytul:\n \"\"\"\n Dostaje tytuł: pełną nazwę albo skrót\n \"\"\"\n try:\n return Tytul.objects.get(nazwa__iexact=tytul)\n except (Tytul.DoesNotExist, Tytul.MultipleObjectsReturned):\n return Tytul.objects.get(skrot=normalize_tytul_naukowy(tytul))\n\n\ndef matchuj_funkcja_autora(funkcja_autora: str) ->Funkcja_Autora:\n funkcja_autora = normalize_funkcja_autora(funkcja_autora)\n return Funkcja_Autora.objects.get(Q(nazwa__iexact=funkcja_autora) | Q(\n skrot__iexact=funkcja_autora))\n\n\ndef matchuj_grupa_pracownicza(grupa_pracownicza: str) ->Grupa_Pracownicza:\n grupa_pracownicza = normalize_grupa_pracownicza(grupa_pracownicza)\n return Grupa_Pracownicza.objects.get(nazwa__iexact=grupa_pracownicza)\n\n\ndef matchuj_wymiar_etatu(wymiar_etatu: str) ->Wymiar_Etatu:\n wymiar_etatu = normalize_wymiar_etatu(wymiar_etatu)\n return Wymiar_Etatu.objects.get(nazwa__iexact=wymiar_etatu)\n\n\ndef matchuj_jednostke(nazwa, wydzial=None):\n nazwa = normalize_nazwa_jednostki(nazwa)\n try:\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa))\n except Jednostka.DoesNotExist:\n if nazwa.endswith('.'):\n nazwa = nazwa[:-1].strip()\n try:\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__istartswith=nazwa) | Q(\n skrot__istartswith=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(\n skrot__iexact=nazwa), Q(wydzial__nazwa__iexact=wydzial))\n\n\ndef matchuj_autora(imiona: str, nazwisko: str, jednostka: Union[Jednostka,\n None]=None, bpp_id: Union[int, None]=None, pbn_uid_id: Union[str, None]\n =None, system_kadrowy_id: Union[int, None]=None, pbn_id: Union[int,\n None]=None, orcid: Union[str, None]=None, tytul_str: Union[Tytul, None]\n =None):\n if bpp_id is not None:\n try:\n return Autor.objects.get(pk=bpp_id)\n except Autor.DoesNotExist:\n pass\n if orcid:\n try:\n return Autor.objects.get(orcid__iexact=orcid.strip())\n except Autor.DoesNotExist:\n pass\n if pbn_uid_id is not None and pbn_uid_id.strip() != '':\n _qset = Autor.objects.filter(pbn_uid_id=pbn_uid_id)\n if _qset.exists():\n return _qset.first()\n if system_kadrowy_id is not None:\n try:\n int(system_kadrowy_id)\n except (TypeError, ValueError):\n system_kadrowy_id = None\n if system_kadrowy_id is not None:\n try:\n return Autor.objects.get(system_kadrowy_id=system_kadrowy_id)\n except Autor.DoesNotExist:\n pass\n if pbn_id is not None:\n if isinstance(pbn_id, str):\n pbn_id = pbn_id.strip()\n try:\n pbn_id = int(pbn_id)\n except (TypeError, ValueError):\n pbn_id = None\n if pbn_id is not None:\n try:\n return Autor.objects.get(pbn_id=pbn_id)\n except Autor.DoesNotExist:\n pass\n queries = [Q(Q(nazwisko__iexact=nazwisko.strip()) | Q(\n poprzednie_nazwiska__icontains=nazwisko.strip()), imiona__iexact=\n imiona.strip())]\n if tytul_str:\n queries.append(queries[0] & Q(tytul__skrot=tytul_str))\n for qry in queries:\n try:\n return Autor.objects.get(qry)\n except (Autor.DoesNotExist, Autor.MultipleObjectsReturned):\n pass\n try:\n return Autor.objects.get(qry & Q(aktualna_jednostka=jednostka))\n except (Autor.MultipleObjectsReturned, Autor.DoesNotExist):\n pass\n if jednostka:\n queries = [Q(Q(autor__nazwisko__iexact=nazwisko.strip()) | Q(\n autor__poprzednie_nazwiska__icontains=nazwisko.strip()),\n autor__imiona__iexact=imiona.strip())]\n if tytul_str:\n queries.append(queries[0] & Q(autor__tytul__skrot=tytul_str))\n for qry in queries:\n try:\n return jednostka.autor_jednostka_set.get(qry).autor\n except (Autor_Jednostka.MultipleObjectsReturned,\n Autor_Jednostka.DoesNotExist):\n pass\n return None\n\n\ndef matchuj_zrodlo(s: Union[str, None], issn: Union[str, None]=None, e_issn:\n Union[str, None]=None, alt_nazwa=None) ->Union[None, Zrodlo]:\n if s is None or str(s) == '':\n return\n if issn is not None:\n try:\n return Zrodlo.objects.get(issn=issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n if e_issn is not None:\n try:\n return Zrodlo.objects.get(e_issn=e_issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n for elem in (s, alt_nazwa):\n if elem is None:\n continue\n elem = normalize_tytul_zrodla(elem)\n try:\n return Zrodlo.objects.get(Q(nazwa__iexact=elem) | Q(\n skrot__iexact=elem))\n except Zrodlo.MultipleObjectsReturned:\n pass\n except Zrodlo.DoesNotExist:\n if elem.endswith('.'):\n try:\n return Zrodlo.objects.get(Q(nazwa__istartswith=elem[:-1\n ]) | Q(skrot__istartswith=elem[:-1]))\n except Zrodlo.DoesNotExist:\n pass\n except Zrodlo.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_dyscypline(kod, nazwa):\n nazwa = normalize_nazwa_dyscypliny(nazwa)\n try:\n return Dyscyplina_Naukowa.objects.get(nazwa=nazwa)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n kod = normalize_kod_dyscypliny(kod)\n try:\n return Dyscyplina_Naukowa.objects.get(kod=kod)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_wydawce(nazwa, pbn_uid_id=None, similarity=0.9):\n nazwa = normalize_nazwa_wydawcy(nazwa)\n try:\n return Wydawca.objects.get(nazwa=nazwa, alias_dla_id=None)\n except Wydawca.DoesNotExist:\n pass\n if pbn_uid_id is not None:\n try:\n return Wydawca.objects.get(pbn_uid_id=pbn_uid_id)\n except Wydawca.DoesNotExist:\n pass\n loose = Wydawca.objects.annotate(similarity=TrigramSimilarity('nazwa',\n nazwa)).filter(similarity__gte=similarity).order_by('-similarity')[:5]\n if loose.count() > 0 and loose.count() < 2:\n return loose.first()\n\n\nTITLE_LIMIT_SINGLE_WORD = 15\nTITLE_LIMIT_MANY_WORDS = 25\nMATCH_SIMILARITY_THRESHOLD = 0.95\nMATCH_SIMILARITY_THRESHOLD_LOW = 0.9\nMATCH_SIMILARITY_THRESHOLD_VERY_LOW = 0.8\nnormalized_db_title = Trim(Replace(Replace(Lower('tytul_oryginalny'), Value\n (' [online]'), Value('')), Value(' '), Value(' ')))\nnormalized_db_zrodlo_skrot = Trim(Replace(Replace(Replace(Lower('skrot'),\n Value(' '), Value('')), Value('-'), Value('')), Value('.'), Value('')))\n\n\ndef normalize_zrodlo_skrot_for_db_lookup(s):\n return s.lower().replace(' ', '').strip().replace('-', '').replace('.', '')\n\n\nnormalized_db_zrodlo_nazwa = Trim(Replace(Lower('nazwa'), Value(' '), Value\n ('')))\n\n\ndef normalize_zrodlo_nazwa_for_db_lookup(s):\n return s.lower().replace(' ', '').strip()\n\n\nnormalized_db_isbn = Trim(Replace(Lower('isbn'), Value('-'), Value('')))\n\n\ndef matchuj_publikacje(klass: [Wydawnictwo_Zwarte, Wydawnictwo_Ciagle,\n Rekord], title, year, doi=None, public_uri=None, isbn=None, zrodlo=None,\n DEBUG_MATCHOWANIE=False, isbn_matchuj_tylko_nadrzedne=True,\n doi_matchuj_tylko_nadrzedne=True):\n if doi is not None:\n doi = normalize_doi(doi)\n if doi:\n zapytanie = klass.objects.filter(doi__istartswith=doi, rok=year)\n if doi_matchuj_tylko_nadrzedne:\n if hasattr(klass, 'wydawnictwo_nadrzedne_id'):\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n res = zapytanie.annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[\n :2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first(\n ).podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n title = normalize_tytul_publikacji(title)\n title_has_spaces = False\n if title is not None:\n title_has_spaces = title.find(' ') > 0\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n if zrodlo is not None and hasattr(klass, 'zrodlo'):\n try:\n return klass.objects.get(tytul_oryginalny__istartswith=\n title, rok=year, zrodlo=zrodlo)\n except klass.DoesNotExist:\n pass\n except klass.MultipleObjectsReturned:\n print(\n f'PPP ZZZ MultipleObjectsReturned dla title={title} rok={year} zrodlo={zrodlo}'\n )\n if isbn is not None and isbn != '' and hasattr(klass, 'isbn') and hasattr(\n klass, 'e_isbn'):\n ni = normalize_isbn(isbn)\n zapytanie = klass.objects.exclude(isbn=None, e_isbn=None).exclude(isbn\n ='', e_isbn='')\n if isbn_matchuj_tylko_nadrzedne:\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n if klass == Rekord:\n zapytanie = zapytanie.filter(pk__in=[(ContentType.objects.\n get_for_model(Wydawnictwo_Zwarte).pk, x) for x in\n Wydawnictwo_Zwarte.objects.\n wydawnictwa_nadrzedne_dla_innych()])\n elif klass == Wydawnictwo_Zwarte:\n zapytanie = zapytanie.filter(pk__in=Wydawnictwo_Zwarte.\n objects.wydawnictwa_nadrzedne_dla_innych())\n else:\n raise NotImplementedError(\n 'Matchowanie po ISBN dla czegoś innego niż wydawnictwo zwarte nie opracowane'\n )\n res = zapytanie.filter(Q(isbn=ni) | Q(e_isbn=ni)).annotate(podobienstwo\n =TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n public_uri = normalize_public_uri(public_uri)\n if public_uri:\n res = klass.objects.filter(Q(www=public_uri) | Q(public_www=public_uri)\n ).annotate(podobienstwo=TrigramSimilarity(normalized_db_title,\n title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n if title is not None and (not title_has_spaces and len(title) >=\n TITLE_LIMIT_SINGLE_WORD or title_has_spaces and len(title) >=\n TITLE_LIMIT_MANY_WORDS):\n res = klass.objects.filter(tytul_oryginalny__istartswith=title, rok\n =year).annotate(podobienstwo=TrigramSimilarity(\n normalized_db_title, title.lower())).order_by('-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n res = klass.objects.filter(rok=year).annotate(podobienstwo=\n TrigramSimilarity(normalized_db_title, title.lower())).order_by(\n '-podobienstwo')[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_LOW:\n return res.first()\n",
"step-5": "from typing import Union\n\nfrom django.db.models import Q, Value\nfrom django.db.models.functions import Lower, Replace, Trim\n\nfrom .normalization import (\n normalize_doi,\n normalize_funkcja_autora,\n normalize_grupa_pracownicza,\n normalize_isbn,\n normalize_kod_dyscypliny,\n normalize_nazwa_dyscypliny,\n normalize_nazwa_jednostki,\n normalize_nazwa_wydawcy,\n normalize_public_uri,\n normalize_tytul_naukowy,\n normalize_tytul_publikacji,\n normalize_tytul_zrodla,\n normalize_wymiar_etatu,\n)\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.postgres.search import TrigramSimilarity\n\nfrom bpp.models import (\n Autor,\n Autor_Jednostka,\n Dyscyplina_Naukowa,\n Funkcja_Autora,\n Grupa_Pracownicza,\n Jednostka,\n Rekord,\n Tytul,\n Wydawca,\n Wydawnictwo_Ciagle,\n Wydawnictwo_Zwarte,\n Wydzial,\n Wymiar_Etatu,\n Zrodlo,\n)\nfrom bpp.util import fail_if_seq_scan\n\n\ndef matchuj_wydzial(nazwa):\n try:\n return Wydzial.objects.get(nazwa__iexact=nazwa.strip())\n except Wydzial.DoesNotExist:\n pass\n\n\ndef matchuj_tytul(tytul: str, create_if_not_exist=False) -> Tytul:\n \"\"\"\n Dostaje tytuł: pełną nazwę albo skrót\n \"\"\"\n\n try:\n return Tytul.objects.get(nazwa__iexact=tytul)\n except (Tytul.DoesNotExist, Tytul.MultipleObjectsReturned):\n return Tytul.objects.get(skrot=normalize_tytul_naukowy(tytul))\n\n\ndef matchuj_funkcja_autora(funkcja_autora: str) -> Funkcja_Autora:\n funkcja_autora = normalize_funkcja_autora(funkcja_autora)\n return Funkcja_Autora.objects.get(\n Q(nazwa__iexact=funkcja_autora) | Q(skrot__iexact=funkcja_autora)\n )\n\n\ndef matchuj_grupa_pracownicza(grupa_pracownicza: str) -> Grupa_Pracownicza:\n grupa_pracownicza = normalize_grupa_pracownicza(grupa_pracownicza)\n return Grupa_Pracownicza.objects.get(nazwa__iexact=grupa_pracownicza)\n\n\ndef matchuj_wymiar_etatu(wymiar_etatu: str) -> Wymiar_Etatu:\n wymiar_etatu = normalize_wymiar_etatu(wymiar_etatu)\n return Wymiar_Etatu.objects.get(nazwa__iexact=wymiar_etatu)\n\n\ndef matchuj_jednostke(nazwa, wydzial=None):\n nazwa = normalize_nazwa_jednostki(nazwa)\n\n try:\n return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(skrot__iexact=nazwa))\n except Jednostka.DoesNotExist:\n if nazwa.endswith(\".\"):\n nazwa = nazwa[:-1].strip()\n\n try:\n return Jednostka.objects.get(\n Q(nazwa__istartswith=nazwa) | Q(skrot__istartswith=nazwa)\n )\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n\n return Jednostka.objects.get(\n Q(nazwa__istartswith=nazwa) | Q(skrot__istartswith=nazwa),\n Q(wydzial__nazwa__iexact=wydzial),\n )\n\n except Jednostka.MultipleObjectsReturned as e:\n if wydzial is None:\n raise e\n\n return Jednostka.objects.get(\n Q(nazwa__iexact=nazwa) | Q(skrot__iexact=nazwa),\n Q(wydzial__nazwa__iexact=wydzial),\n )\n\n\ndef matchuj_autora(\n imiona: str,\n nazwisko: str,\n jednostka: Union[Jednostka, None] = None,\n bpp_id: Union[int, None] = None,\n pbn_uid_id: Union[str, None] = None,\n system_kadrowy_id: Union[int, None] = None,\n pbn_id: Union[int, None] = None,\n orcid: Union[str, None] = None,\n tytul_str: Union[Tytul, None] = None,\n):\n if bpp_id is not None:\n try:\n return Autor.objects.get(pk=bpp_id)\n except Autor.DoesNotExist:\n pass\n\n if orcid:\n try:\n return Autor.objects.get(orcid__iexact=orcid.strip())\n except Autor.DoesNotExist:\n pass\n\n if pbn_uid_id is not None and pbn_uid_id.strip() != \"\":\n # Może być > 1 autor z takim pbn_uid_id\n _qset = Autor.objects.filter(pbn_uid_id=pbn_uid_id)\n if _qset.exists():\n return _qset.first()\n\n if system_kadrowy_id is not None:\n try:\n int(system_kadrowy_id)\n except (TypeError, ValueError):\n system_kadrowy_id = None\n\n if system_kadrowy_id is not None:\n try:\n return Autor.objects.get(system_kadrowy_id=system_kadrowy_id)\n except Autor.DoesNotExist:\n pass\n\n if pbn_id is not None:\n if isinstance(pbn_id, str):\n pbn_id = pbn_id.strip()\n\n try:\n pbn_id = int(pbn_id)\n except (TypeError, ValueError):\n pbn_id = None\n\n if pbn_id is not None:\n try:\n return Autor.objects.get(pbn_id=pbn_id)\n except Autor.DoesNotExist:\n pass\n\n queries = [\n Q(\n Q(nazwisko__iexact=nazwisko.strip())\n | Q(poprzednie_nazwiska__icontains=nazwisko.strip()),\n imiona__iexact=imiona.strip(),\n )\n ]\n\n if tytul_str:\n queries.append(queries[0] & Q(tytul__skrot=tytul_str))\n\n for qry in queries:\n try:\n return Autor.objects.get(qry)\n except (Autor.DoesNotExist, Autor.MultipleObjectsReturned):\n pass\n\n try:\n return Autor.objects.get(qry & Q(aktualna_jednostka=jednostka))\n except (Autor.MultipleObjectsReturned, Autor.DoesNotExist):\n pass\n\n # Jesteśmy tutaj. Najwyraźniej poszukiwanie po aktualnej jednostce, imieniu, nazwisku,\n # tytule itp nie bardzo się powiodło. Spróbujmy innej strategii -- jednostka jest\n # określona, poszukajmy w jej autorach. Wszak nie musi być ta jednostka jednostką\n # aktualną...\n\n if jednostka:\n\n queries = [\n Q(\n Q(autor__nazwisko__iexact=nazwisko.strip())\n | Q(autor__poprzednie_nazwiska__icontains=nazwisko.strip()),\n autor__imiona__iexact=imiona.strip(),\n )\n ]\n if tytul_str:\n queries.append(queries[0] & Q(autor__tytul__skrot=tytul_str))\n\n for qry in queries:\n try:\n return jednostka.autor_jednostka_set.get(qry).autor\n except (\n Autor_Jednostka.MultipleObjectsReturned,\n Autor_Jednostka.DoesNotExist,\n ):\n pass\n\n return None\n\n\ndef matchuj_zrodlo(\n s: Union[str, None],\n issn: Union[str, None] = None,\n e_issn: Union[str, None] = None,\n alt_nazwa=None,\n) -> Union[None, Zrodlo]:\n if s is None or str(s) == \"\":\n return\n\n if issn is not None:\n try:\n return Zrodlo.objects.get(issn=issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n\n if e_issn is not None:\n try:\n return Zrodlo.objects.get(e_issn=e_issn)\n except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):\n pass\n\n for elem in s, alt_nazwa:\n if elem is None:\n continue\n\n elem = normalize_tytul_zrodla(elem)\n try:\n return Zrodlo.objects.get(Q(nazwa__iexact=elem) | Q(skrot__iexact=elem))\n except Zrodlo.MultipleObjectsReturned:\n pass\n except Zrodlo.DoesNotExist:\n if elem.endswith(\".\"):\n try:\n return Zrodlo.objects.get(\n Q(nazwa__istartswith=elem[:-1])\n | Q(skrot__istartswith=elem[:-1])\n )\n except Zrodlo.DoesNotExist:\n pass\n except Zrodlo.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_dyscypline(kod, nazwa):\n nazwa = normalize_nazwa_dyscypliny(nazwa)\n try:\n return Dyscyplina_Naukowa.objects.get(nazwa=nazwa)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n\n kod = normalize_kod_dyscypliny(kod)\n try:\n return Dyscyplina_Naukowa.objects.get(kod=kod)\n except Dyscyplina_Naukowa.DoesNotExist:\n pass\n except Dyscyplina_Naukowa.MultipleObjectsReturned:\n pass\n\n\ndef matchuj_wydawce(nazwa, pbn_uid_id=None, similarity=0.9):\n nazwa = normalize_nazwa_wydawcy(nazwa)\n try:\n return Wydawca.objects.get(nazwa=nazwa, alias_dla_id=None)\n except Wydawca.DoesNotExist:\n pass\n\n if pbn_uid_id is not None:\n\n try:\n return Wydawca.objects.get(pbn_uid_id=pbn_uid_id)\n except Wydawca.DoesNotExist:\n pass\n\n loose = (\n Wydawca.objects.annotate(similarity=TrigramSimilarity(\"nazwa\", nazwa))\n .filter(similarity__gte=similarity)\n .order_by(\"-similarity\")[:5]\n )\n if loose.count() > 0 and loose.count() < 2:\n return loose.first()\n\n\nTITLE_LIMIT_SINGLE_WORD = 15\nTITLE_LIMIT_MANY_WORDS = 25\n\nMATCH_SIMILARITY_THRESHOLD = 0.95\nMATCH_SIMILARITY_THRESHOLD_LOW = 0.90\nMATCH_SIMILARITY_THRESHOLD_VERY_LOW = 0.80\n\n# Znormalizowany tytuł w bazie danych -- wyrzucony ciąg znaków [online], podwójne\n# spacje pozamieniane na pojedyncze, trim całości\nnormalized_db_title = Trim(\n Replace(\n Replace(Lower(\"tytul_oryginalny\"), Value(\" [online]\"), Value(\"\")),\n Value(\" \"),\n Value(\" \"),\n )\n)\n\n# Znormalizowany skrót nazwy źródła -- wyrzucone spacje i kropki, trim, zmniejszone\n# znaki\nnormalized_db_zrodlo_skrot = Trim(\n Replace(\n Replace(\n Replace(Lower(\"skrot\"), Value(\" \"), Value(\"\")),\n Value(\"-\"),\n Value(\"\"),\n ),\n Value(\".\"),\n Value(\"\"),\n )\n)\n\n\ndef normalize_zrodlo_skrot_for_db_lookup(s):\n return s.lower().replace(\" \", \"\").strip().replace(\"-\", \"\").replace(\".\", \"\")\n\n\n# Znormalizowany skrot zrodla do wyszukiwania -- wyrzucone wszystko procz kropek\nnormalized_db_zrodlo_nazwa = Trim(\n Replace(Lower(\"nazwa\"), Value(\" \"), Value(\"\")),\n)\n\n\ndef normalize_zrodlo_nazwa_for_db_lookup(s):\n return s.lower().replace(\" \", \"\").strip()\n\n\nnormalized_db_isbn = Trim(Replace(Lower(\"isbn\"), Value(\"-\"), Value(\"\")))\n\n\ndef matchuj_publikacje(\n klass: [Wydawnictwo_Zwarte, Wydawnictwo_Ciagle, Rekord],\n title,\n year,\n doi=None,\n public_uri=None,\n isbn=None,\n zrodlo=None,\n DEBUG_MATCHOWANIE=False,\n isbn_matchuj_tylko_nadrzedne=True,\n doi_matchuj_tylko_nadrzedne=True,\n):\n\n if doi is not None:\n doi = normalize_doi(doi)\n if doi:\n zapytanie = klass.objects.filter(doi__istartswith=doi, rok=year)\n\n if doi_matchuj_tylko_nadrzedne:\n if hasattr(klass, \"wydawnictwo_nadrzedne_id\"):\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n\n res = zapytanie.annotate(\n podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())\n ).order_by(\"-podobienstwo\")[:2]\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n\n title = normalize_tytul_publikacji(title)\n\n title_has_spaces = False\n\n if title is not None:\n title_has_spaces = title.find(\" \") > 0\n\n if title is not None and (\n (not title_has_spaces and len(title) >= TITLE_LIMIT_SINGLE_WORD)\n or (title_has_spaces and len(title) >= TITLE_LIMIT_MANY_WORDS)\n ):\n if zrodlo is not None and hasattr(klass, \"zrodlo\"):\n try:\n return klass.objects.get(\n tytul_oryginalny__istartswith=title, rok=year, zrodlo=zrodlo\n )\n except klass.DoesNotExist:\n pass\n except klass.MultipleObjectsReturned:\n print(\n f\"PPP ZZZ MultipleObjectsReturned dla title={title} rok={year} zrodlo={zrodlo}\"\n )\n\n if (\n isbn is not None\n and isbn != \"\"\n and hasattr(klass, \"isbn\")\n and hasattr(klass, \"e_isbn\")\n ):\n ni = normalize_isbn(isbn)\n\n zapytanie = klass.objects.exclude(isbn=None, e_isbn=None).exclude(\n isbn=\"\", e_isbn=\"\"\n )\n\n if isbn_matchuj_tylko_nadrzedne:\n zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)\n\n if klass == Rekord:\n zapytanie = zapytanie.filter(\n pk__in=[\n (ContentType.objects.get_for_model(Wydawnictwo_Zwarte).pk, x)\n for x in Wydawnictwo_Zwarte.objects.wydawnictwa_nadrzedne_dla_innych()\n ]\n )\n elif klass == Wydawnictwo_Zwarte:\n zapytanie = zapytanie.filter(\n pk__in=Wydawnictwo_Zwarte.objects.wydawnictwa_nadrzedne_dla_innych()\n )\n else:\n raise NotImplementedError(\n \"Matchowanie po ISBN dla czegoś innego niż wydawnictwo zwarte nie opracowane\"\n )\n\n #\n # Uwaga uwaga uwaga.\n #\n # Gdy matchujemy ISBN, to w BPP dochodzi do takiej nieciekawej sytuacji: wpisywany jest\n # ISBN zarówno dla rozdziałów jak i dla wydawnictw nadrzędnych.\n #\n # Zatem, na ten moment, aby usprawnić matchowanie ISBN, jeżeli ustawiona jest flaga\n # isbn_matchuj_tylko_nadrzedne, to system bedzie szukał tylko i wyłącznie wśród\n # rekordów będących wydawnictwami nadrzędnymi (czyli nie mającymi rekordów podrzędnych)\n #\n\n res = (\n zapytanie.filter(Q(isbn=ni) | Q(e_isbn=ni))\n .annotate(\n podobienstwo=TrigramSimilarity(\n normalized_db_title,\n title.lower(),\n )\n )\n .order_by(\"-podobienstwo\")[:2]\n )\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:\n return res.first()\n\n public_uri = normalize_public_uri(public_uri)\n if public_uri:\n res = (\n klass.objects.filter(Q(www=public_uri) | Q(public_www=public_uri))\n .annotate(\n podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())\n )\n .order_by(\"-podobienstwo\")[:2]\n )\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n\n if title is not None and (\n (not title_has_spaces and len(title) >= TITLE_LIMIT_SINGLE_WORD)\n or (title_has_spaces and len(title) >= TITLE_LIMIT_MANY_WORDS)\n ):\n res = (\n klass.objects.filter(tytul_oryginalny__istartswith=title, rok=year)\n .annotate(\n podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())\n )\n .order_by(\"-podobienstwo\")[:2]\n )\n\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:\n return res.first()\n\n # Ostatnia szansa, po podobieństwie, niski próg\n\n res = (\n klass.objects.filter(rok=year)\n .annotate(\n podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())\n )\n .order_by(\"-podobienstwo\")[:2]\n )\n\n fail_if_seq_scan(res, DEBUG_MATCHOWANIE)\n if res.exists():\n if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_LOW:\n return res.first()\n",
"step-ids": [
11,
12,
13,
15,
16
]
}
|
[
11,
12,
13,
15,
16
] |
from psycopg2 import ProgrammingError, IntegrityError
import datetime
from loguru import logger
from db.connect import open_cursor, open_connection
_log_file_name = __file__.split("/")[-1].split(".")[0]
logger.add(f"logs/{_log_file_name}.log", rotation="1 day")
class DataTypeSaveError(Exception):
pass
class TypeValidationError(Exception):
pass
class MultipleRowsError(Exception):
pass
class DoesNotExist(Exception):
pass
type_map = {str: "%s", int: "%d", float: "%f"}
class BaseDataClass:
def _create_insert_query(self):
column_names = ""
row_values = ""
values = []
for column_name, row_value in self.__dict__.items():
if column_name.startswith("_"):
continue
if column_name == "id" and row_value is None:
# If id is None, leave it to the db to deal with incrementing the pk.
continue
column_names += str(column_name) + ", "
row_values += "%s, "
values.append(row_value)
columns = "(" + column_names[:-2] + ")"
values_reprs = "(" + row_values[:-2] + ")"
query = f"INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;"
return query, values
@classmethod
def _create_select_query(cls, **kwargs):
key_value_pairs = ""
for key, value in kwargs.items():
if value is None:
continue
key_value_pairs += f"{key} = '{value}' AND "
key_value_pairs = key_value_pairs[:-5]
query = f"SELECT * FROM {cls._table_name} WHERE {key_value_pairs};"
return query
def save(self, commit=True, with_get=True):
"""Store conent to database.
This should be thread safe by using asyncio's Lock in open_cursor.
"""
self.validate()
logger.debug(f"Save: {self}")
query, values = self._create_insert_query()
with open_connection() as conn:
with open_cursor(conn) as cursor:
try:
cursor.execute(query, tuple(values))
if with_get:
_id = cursor.fetchone()[0]
logger.debug(f"Saved value with id: {_id}")
self.id = _id or self.id
if not self.id:
logger.warning(f"Returned with an empty id. {self}")
if commit:
conn.commit()
except ProgrammingError as e:
logger.error(e)
raise DataTypeSaveError
except IntegrityError as e:
logger.warning(f"Could not save: {self}")
logger.error(e)
return self
def clean(self):
logger.debug(f"Cleaning: {self}")
def validate(self):
annotations = self.__annotations__
keys_ = annotations.keys()
fields = self.__dict__
for key in keys_:
if not isinstance(fields[key], annotations[key]):
if key == "id" and fields[key] is None:
# Pass None to id and allow the DB to increment it.
continue
if key in self._ignore_fields:
continue
try:
self.__dict__[key] = annotations[key](fields[key])
except (TypeError, ValueError) as e:
logger.error(
f"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}."
)
logger.error(e)
raise TypeValidationError(
f"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}."
)
@classmethod
def prepare(cls, *args):
return args
@classmethod
def create(cls, with_get=False, **kwargs):
inst = cls(**kwargs)
inst.clean()
inst.save(with_get=with_get)
return inst
@classmethod
def _get_rows(cls, **kwargs):
logger.debug(f"{cls}._get_rows")
query = cls._create_select_query(**kwargs)
with open_connection() as conn:
with open_cursor(conn) as cursor:
cursor.execute(query)
rows = cursor.fetchall()
return rows
@classmethod
def all(cls, **kwargs):
logger.debug(f"Get all: {cls}")
rows = cls._get_rows(**kwargs)
instances = []
for row in rows:
instances.append(cls(*row))
return instances
@classmethod
def get(cls, **kwargs):
logger.debug(f"Get: {cls}")
rows = cls._get_rows(**kwargs)
logger.debug(f"Rows: {rows}")
if not rows:
raise DoesNotExist(f"{cls}({kwargs}")
if len(rows) > 1:
raise MultipleRowsError(f"Got {len(rows)} entries in {cls}.get()")
if isinstance(rows, list):
row = rows[0]
else:
row = rows
return cls(*row)
def get_id(self):
logger.debug(f"Get own id: {self}.")
return self.__class__.get(**self.__dict__).id
|
normal
|
{
"blob_id": "8339ac512d851ea20938a1fbeedcb751cb2b8a6a",
"index": 4337,
"step-1": "<mask token>\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f'{cls}._get_rows')\n query = cls._create_select_query(**kwargs)\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f'Get all: {cls}')\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f'Get: {cls}')\n rows = cls._get_rows(**kwargs)\n logger.debug(f'Rows: {rows}')\n if not rows:\n raise DoesNotExist(f'{cls}({kwargs}')\n if len(rows) > 1:\n raise MultipleRowsError(f'Got {len(rows)} entries in {cls}.get()')\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n return cls(*row)\n\n def get_id(self):\n logger.debug(f'Get own id: {self}.')\n return self.__class__.get(**self.__dict__).id\n",
"step-3": "<mask token>\n_log_file_name = __file__.split('/')[-1].split('.')[0]\nlogger.add(f'logs/{_log_file_name}.log', rotation='1 day')\n\n\nclass DataTypeSaveError(Exception):\n pass\n\n\nclass TypeValidationError(Exception):\n pass\n\n\nclass MultipleRowsError(Exception):\n pass\n\n\nclass DoesNotExist(Exception):\n pass\n\n\ntype_map = {str: '%s', int: '%d', float: '%f'}\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f'{cls}._get_rows')\n query = cls._create_select_query(**kwargs)\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f'Get all: {cls}')\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f'Get: {cls}')\n rows = cls._get_rows(**kwargs)\n logger.debug(f'Rows: {rows}')\n if not rows:\n raise DoesNotExist(f'{cls}({kwargs}')\n if len(rows) > 1:\n raise MultipleRowsError(f'Got {len(rows)} entries in {cls}.get()')\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n return cls(*row)\n\n def get_id(self):\n logger.debug(f'Get own id: {self}.')\n return self.__class__.get(**self.__dict__).id\n",
"step-4": "from psycopg2 import ProgrammingError, IntegrityError\nimport datetime\nfrom loguru import logger\nfrom db.connect import open_cursor, open_connection\n_log_file_name = __file__.split('/')[-1].split('.')[0]\nlogger.add(f'logs/{_log_file_name}.log', rotation='1 day')\n\n\nclass DataTypeSaveError(Exception):\n pass\n\n\nclass TypeValidationError(Exception):\n pass\n\n\nclass MultipleRowsError(Exception):\n pass\n\n\nclass DoesNotExist(Exception):\n pass\n\n\ntype_map = {str: '%s', int: '%d', float: '%f'}\n\n\nclass BaseDataClass:\n\n def _create_insert_query(self):\n column_names = ''\n row_values = ''\n values = []\n for column_name, row_value in self.__dict__.items():\n if column_name.startswith('_'):\n continue\n if column_name == 'id' and row_value is None:\n continue\n column_names += str(column_name) + ', '\n row_values += '%s, '\n values.append(row_value)\n columns = '(' + column_names[:-2] + ')'\n values_reprs = '(' + row_values[:-2] + ')'\n query = (\n f'INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;'\n )\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n key_value_pairs = ''\n for key, value in kwargs.items():\n if value is None:\n continue\n key_value_pairs += f\"{key} = '{value}' AND \"\n key_value_pairs = key_value_pairs[:-5]\n query = f'SELECT * FROM {cls._table_name} WHERE {key_value_pairs};'\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f'Save: {self}')\n query, values = self._create_insert_query()\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f'Saved value with id: {_id}')\n self.id = _id or self.id\n if not self.id:\n logger.warning(f'Returned with an empty id. {self}'\n )\n if commit:\n conn.commit()\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f'Could not save: {self}')\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f'Cleaning: {self}')\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == 'id' and fields[key] is None:\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n logger.error(e)\n raise TypeValidationError(\n f'Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.'\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f'{cls}._get_rows')\n query = cls._create_select_query(**kwargs)\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f'Get all: {cls}')\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f'Get: {cls}')\n rows = cls._get_rows(**kwargs)\n logger.debug(f'Rows: {rows}')\n if not rows:\n raise DoesNotExist(f'{cls}({kwargs}')\n if len(rows) > 1:\n raise MultipleRowsError(f'Got {len(rows)} entries in {cls}.get()')\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n return cls(*row)\n\n def get_id(self):\n logger.debug(f'Get own id: {self}.')\n return self.__class__.get(**self.__dict__).id\n",
"step-5": "from psycopg2 import ProgrammingError, IntegrityError\nimport datetime\nfrom loguru import logger\n\nfrom db.connect import open_cursor, open_connection\n\n\n_log_file_name = __file__.split(\"/\")[-1].split(\".\")[0]\nlogger.add(f\"logs/{_log_file_name}.log\", rotation=\"1 day\")\n\n\nclass DataTypeSaveError(Exception):\n pass\n\n\nclass TypeValidationError(Exception):\n pass\n\n\nclass MultipleRowsError(Exception):\n pass\n\n\nclass DoesNotExist(Exception):\n pass\n\n\ntype_map = {str: \"%s\", int: \"%d\", float: \"%f\"}\n\n\nclass BaseDataClass:\n def _create_insert_query(self):\n\n column_names = \"\"\n row_values = \"\"\n values = []\n for column_name, row_value in self.__dict__.items():\n\n if column_name.startswith(\"_\"):\n continue\n\n if column_name == \"id\" and row_value is None:\n # If id is None, leave it to the db to deal with incrementing the pk.\n continue\n\n column_names += str(column_name) + \", \"\n row_values += \"%s, \"\n values.append(row_value)\n\n columns = \"(\" + column_names[:-2] + \")\"\n values_reprs = \"(\" + row_values[:-2] + \")\"\n\n query = f\"INSERT INTO {self._table_name} {columns} VALUES {values_reprs} RETURNING id;\"\n\n return query, values\n\n @classmethod\n def _create_select_query(cls, **kwargs):\n\n key_value_pairs = \"\"\n for key, value in kwargs.items():\n\n if value is None:\n continue\n\n key_value_pairs += f\"{key} = '{value}' AND \"\n\n key_value_pairs = key_value_pairs[:-5]\n\n query = f\"SELECT * FROM {cls._table_name} WHERE {key_value_pairs};\"\n\n return query\n\n def save(self, commit=True, with_get=True):\n \"\"\"Store conent to database.\n This should be thread safe by using asyncio's Lock in open_cursor.\n\n\n \"\"\"\n self.validate()\n logger.debug(f\"Save: {self}\")\n query, values = self._create_insert_query()\n\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n try:\n\n cursor.execute(query, tuple(values))\n if with_get:\n _id = cursor.fetchone()[0]\n logger.debug(f\"Saved value with id: {_id}\")\n self.id = _id or self.id\n if not self.id:\n logger.warning(f\"Returned with an empty id. {self}\")\n if commit:\n conn.commit()\n\n except ProgrammingError as e:\n logger.error(e)\n raise DataTypeSaveError\n except IntegrityError as e:\n logger.warning(f\"Could not save: {self}\")\n logger.error(e)\n return self\n\n def clean(self):\n logger.debug(f\"Cleaning: {self}\")\n\n def validate(self):\n annotations = self.__annotations__\n keys_ = annotations.keys()\n fields = self.__dict__\n for key in keys_:\n if not isinstance(fields[key], annotations[key]):\n if key == \"id\" and fields[key] is None:\n # Pass None to id and allow the DB to increment it.\n continue\n if key in self._ignore_fields:\n continue\n try:\n self.__dict__[key] = annotations[key](fields[key])\n except (TypeError, ValueError) as e:\n logger.error(\n f\"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.\"\n )\n logger.error(e)\n raise TypeValidationError(\n f\"Encountered wrong type for {key}, got {type(fields[key])} but expected: {annotations[key]}.\"\n )\n\n @classmethod\n def prepare(cls, *args):\n return args\n\n @classmethod\n def create(cls, with_get=False, **kwargs):\n inst = cls(**kwargs)\n inst.clean()\n inst.save(with_get=with_get)\n return inst\n\n @classmethod\n def _get_rows(cls, **kwargs):\n logger.debug(f\"{cls}._get_rows\")\n query = cls._create_select_query(**kwargs)\n\n with open_connection() as conn:\n with open_cursor(conn) as cursor:\n cursor.execute(query)\n rows = cursor.fetchall()\n\n return rows\n\n @classmethod\n def all(cls, **kwargs):\n logger.debug(f\"Get all: {cls}\")\n rows = cls._get_rows(**kwargs)\n instances = []\n for row in rows:\n instances.append(cls(*row))\n\n return instances\n\n @classmethod\n def get(cls, **kwargs):\n logger.debug(f\"Get: {cls}\")\n rows = cls._get_rows(**kwargs)\n logger.debug(f\"Rows: {rows}\")\n\n if not rows:\n raise DoesNotExist(f\"{cls}({kwargs}\")\n\n if len(rows) > 1:\n raise MultipleRowsError(f\"Got {len(rows)} entries in {cls}.get()\")\n\n if isinstance(rows, list):\n row = rows[0]\n else:\n row = rows\n\n return cls(*row)\n\n def get_id(self):\n logger.debug(f\"Get own id: {self}.\")\n return self.__class__.get(**self.__dict__).id\n",
"step-ids": [
6,
12,
18,
19,
20
]
}
|
[
6,
12,
18,
19,
20
] |
offset = input()
cal = 1030 + int(offset) * 100
if 0 < cal < 2400:
print('Tuesday')
elif cal < 0:
print('Monday')
else:
print('Wednesday')
|
normal
|
{
"blob_id": "aefb49410e077180a660d17c4c646265a75969a7",
"index": 7509,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif 0 < cal < 2400:\n print('Tuesday')\nelif cal < 0:\n print('Monday')\nelse:\n print('Wednesday')\n",
"step-3": "offset = input()\ncal = 1030 + int(offset) * 100\nif 0 < cal < 2400:\n print('Tuesday')\nelif cal < 0:\n print('Monday')\nelse:\n print('Wednesday')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def bubble_sort(arr):
for i in range(1, len(arr)):
for j in range(0, len(arr) - i):
if arr[j] > arr[j + 1]:
tmp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = tmp
return arr
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def bubble_sort(arr):
for i in range(1, len(arr)):
for j in range(0, len(arr) - i):
if arr[j] > arr[j + 1]:
tmp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = tmp
return arr
if __name__ == '__main__':
r1 = bubble_sort([0, 5, 3, 2, 9, 20, 6, 7, 3])
print(r1)
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# author: MSJ
# date: 2021/3/11
# desc:冒泡排序
def bubble_sort(arr):
for i in range(1, len(arr)):
for j in range(0, len(arr) - i):
if arr[j] > arr[j + 1]:
tmp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = tmp
return arr
if __name__ == '__main__':
r1 = bubble_sort([0, 5, 3, 2, 9, 20, 6, 7, 3])
print(r1)
|
flexible
|
{
"blob_id": "6682c864a3da6f2c894a3a40359726b4eb97d040",
"index": 6109,
"step-1": "<mask token>\n",
"step-2": "def bubble_sort(arr):\n for i in range(1, len(arr)):\n for j in range(0, len(arr) - i):\n if arr[j] > arr[j + 1]:\n tmp = arr[j]\n arr[j] = arr[j + 1]\n arr[j + 1] = tmp\n return arr\n\n\n<mask token>\n",
"step-3": "def bubble_sort(arr):\n for i in range(1, len(arr)):\n for j in range(0, len(arr) - i):\n if arr[j] > arr[j + 1]:\n tmp = arr[j]\n arr[j] = arr[j + 1]\n arr[j + 1] = tmp\n return arr\n\n\nif __name__ == '__main__':\n r1 = bubble_sort([0, 5, 3, 2, 9, 20, 6, 7, 3])\n print(r1)\n",
"step-4": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# author: MSJ\n# date: 2021/3/11\n# desc:冒泡排序\n\n\ndef bubble_sort(arr):\n for i in range(1, len(arr)):\n for j in range(0, len(arr) - i):\n if arr[j] > arr[j + 1]:\n tmp = arr[j]\n arr[j] = arr[j + 1]\n arr[j + 1] = tmp\n\n return arr\n\n\nif __name__ == '__main__':\n r1 = bubble_sort([0, 5, 3, 2, 9, 20, 6, 7, 3])\n print(r1)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns += staticfiles_urlpatterns()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
appname = 'home'
urlpatterns = [path('', views.home, name='home')]
urlpatterns += staticfiles_urlpatterns()
<|reserved_special_token_1|>
from django.urls import path
from django.conf.urls import include, url
from . import views
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
appname = 'home'
urlpatterns = [path('', views.home, name='home')]
urlpatterns += staticfiles_urlpatterns()
<|reserved_special_token_1|>
from django.urls import path
from django.conf.urls import include, url
from . import views
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
appname = 'home'
urlpatterns = [
path('', views.home, name='home'),
]
urlpatterns += staticfiles_urlpatterns()
|
flexible
|
{
"blob_id": "dd23cd068eea570fc187dad2d49b30376fbd4854",
"index": 4856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns += staticfiles_urlpatterns()\n",
"step-3": "<mask token>\nappname = 'home'\nurlpatterns = [path('', views.home, name='home')]\nurlpatterns += staticfiles_urlpatterns()\n",
"step-4": "from django.urls import path\nfrom django.conf.urls import include, url\nfrom . import views\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nappname = 'home'\nurlpatterns = [path('', views.home, name='home')]\nurlpatterns += staticfiles_urlpatterns()\n",
"step-5": "from django.urls import path\nfrom django.conf.urls import include, url\nfrom . import views\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nappname = 'home'\nurlpatterns = [\n path('', views.home, name='home'),\n]\nurlpatterns += staticfiles_urlpatterns()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from collections import namedtuple
import argparse
import pdb
import traceback
import sys
import os
from qca_hex_analyzer import WmiCtrlAnalyzer, HtcCtrlAnalyzer, HttAnalyzer, AllAnalyzer
import hexfilter
description = \
"Tool used to analyze hexdumps produced by a qca wireless kernel " \
"driver (such as ath6kl, ath10k or qcacld2.0). " \
"The hexdumps are assumed to contain dumps of the traffic " \
"between the driver and the target. " \
"No special preprocessing of the log files is required. " \
"Filter strings (description strings) can be used to limit the output " \
"(only RX or TX etc.). " \
"The driver must of course be configured to log all necessary debug " \
"data (for ath6kl and ath10k this means a proper debug mask). "
wmi_ctrl_help = \
"Subcommand for WMI control message parsing. " \
"This subcommand is used to extract WMI control messages from the input. "
wmi_ctrl_description = \
"Extracts WMI control message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"--ep-id is used to determine from which HTC endpoint the data will " \
"be extracted (see description of that option below). " \
"All valid WMI control message ID's will be printed together with the " \
"message enum string (from ath6kl source code). " \
"The --wmi-old option must be used if the driver does not use the WMI " \
"unified protocol (ath6kl). " \
"The WMI control message payload will also be printed together with " \
"message ID's if the --print-data option is used."
htc_ctrl_help = \
"Subcommand for HTC control message parsing. " \
"This subcommand is used to extract HTC control messages from the input. "
htc_ctrl_description = \
"Extracts HTC control message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"All valid HTC control message ID's will be printed together with the " \
"message enum string (from ath6kl source code). " \
"The message payload will also be printed together with the " \
"message ID's if the --print-data option is used. " \
"HTC control messages will always be extracted from endpoint 0."
htt_help = \
"Subcommand for HTT message parsing. " \
"This subcommand is used to extract HTT messages from the input. "
htt_description = \
"Extracts HTT message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"--ep-id is used to determine from which HTC endpoint the data will " \
"be extracted (see description of that option below). " \
"All valid HTT message ID's will be printed together with the " \
"message enum string (from ath10k source code). " \
"The message payload will also be printed together with " \
"message ID's if the --print-data option is used."
all_help = \
"Subcommand for parsing of all supported message types. " \
"This subcommand is used to extract both WMI control, " \
"HTC control and HTT messages from the input. "
all_description = \
"Extracts message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output-file). " \
"The messages can be any of the supported message types " \
"(currently only WMI controli, HTC control and HTT). " \
"--wmi-ctrl-ep-id and --htt-ep-id is used to determine from which " \
"endpoints WMI and HTT data will be extracted " \
"(see description of those options below). " \
"HTC control messages will always be extracted from ep 0. " \
"All valid message ID's will be printed together " \
"with a corresponding message enum string. " \
"The message payload will also be printed together with " \
"message ID's if the --print-data option is used."
def auto_int(x):
return int(x, 0)
def load_options():
global parsed_args
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('-i', '--input-file',
help="Input (log) file. If omitted, "
"stdin will be read.")
base_parser.add_argument('-o', '--output-file',
help="Output file. If omitted, "
"the output will be written to stdout.")
base_parser.add_argument('-n', '--no-timestamps', action="store_true",
help="Specifies whether or not the input file "
"contains timestamps. ")
base_parser.add_argument('-d', '--desc-str', nargs='+', type=str,
help="Description string(s) of the dumps. "
"Only dumps with a prefix "
"matching any of the provided desc strings "
"will be analyzed. "
"If no --desc-str option is given, no "
"description filtering will be performed. "
"The prefix of a hexdump is the short "
"description string before the address "
"in each line of the dump, i.e the hexdump "
"prefix. "
"--desc-str is normally used to select "
"between RX and TX logs and should be "
"combined with a proper --data-direction "
"option.")
base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,
help="This option is used to specify how the "
"hexdata should be interpreted. "
"Valid values are: "
"t2h (target to host) or h2t (host to target). "
"With t2h, RX trailers will be printed if "
"--print-data is used. h2t is default. "
"This option should be combined with an "
"applicable --desc-str option. ")
base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,
help="Description string(s) of the dumps to be. "
"excluded. Similar to --desc-str, but all "
"matching prefixes will be excluded from "
"the analysis.")
base_parser.add_argument('-s', '--short-htc-header', action="store_true",
help="Use 6 byte HTC header (\"old\" format) "
"instead of 8 bytes.")
base_parser.add_argument('-t', '--keep-timestamps', action="store_true",
help="Keep the timestamps associated with each "
"hexdump in the output. "
"This option will only have effect if the "
"log file contains timestamps.")
parser = argparse.ArgumentParser(prog="qca_hex_analyzer",
description=description,
parents=[base_parser])
subparsers = parser.add_subparsers(dest="subparser_name")
parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl',
help=wmi_ctrl_help,
description=wmi_ctrl_description,
parents=[base_parser])
parser_wmi_ctrl.add_argument('--wmi-old', action="store_true",
help="Specifies whether or not the WMI messages "
"are according to the \"old\" WMI protocol. "
"If not set, the messages will be interpreted "
"according to the unified WMI format")
parser_wmi_ctrl.add_argument('-p', '--print-data', action="store_true",
help="Print WMI data message payload (and not just "
"WMI message ID) for all encountered messages. ")
parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[2],
help="WMI control service endpoint ID. "
"This is the endpoint where the WMI control data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"control service endpoint (service id 0x100) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 2 "
"will be used.")
parser_wmi_ctrl.add_argument('--tlv', action="store_true",
help="TLV analysis."
"Each WMI message will be interpreted as a TLV "
"message and the content of the message will be. "
"written out in text (instead of hexdump). "
"If the encountered message is not supported by "
"the parser, the hex data will be printed instead.")
parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID',
nargs='+', type=auto_int,
help="WMI message id filter. "
"Only WMI messages with an id matching any of the "
"provided id's will be included in the output. "
"If no --id | --msg-id option is given, no "
"filtering will be performed. ")
parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',
nargs='+', type=auto_int,
help="WMI message id exclude filter. "
"Similar to --id | --msg-id, but all matching "
"id's will be excluded from the output. ")
parser_htc_ctrl = subparsers.add_parser('htc-ctrl',
help=htc_ctrl_help,
description=htc_ctrl_description,
parents=[base_parser])
parser_htc_ctrl.add_argument('-p', '--print-data', action="store_true",
help="Print HTC ctrl data message payload (and not just "
"message ID) for all encountered messages. ")
parser_htt = subparsers.add_parser('htt',
help=htt_help,
description=htt_description,
parents=[base_parser])
parser_htt.add_argument('-p', '--print-data', action="store_true",
help="Print HTT data message payload (and not just "
"HTT message ID) for all encountered messages. ")
parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[1],
help="HTT service endpoint ID. "
"This is the endpoint where the HTT data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"HTT endpoint (service id 0x300) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 1 "
"will be used.")
parser_all = subparsers.add_parser('all',
help=all_help,
description=all_description,
parents=[base_parser])
parser_all.add_argument('-p', '--print-data', action="store_true",
help="Print message payload (and not just "
"message ID) for all encountered messages. ")
parser_all.add_argument('--wmi-old', action="store_true",
help="Specifies whether or not the WMI messages "
"are according to the \"old\" WMI protocol. "
"If not set, the messages will be interpreted "
"according to the unified WMI format")
parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1,
type=int, default=[1],
help="HTT service endpoint ID. "
"This is the endpoint where the HTT data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"HTT endpoint (service id 0x300) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 1 "
"will be used.")
parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1,
type=int, default=[2],
help="WMI control service endpoint ID. "
"This is the endpoint where the WMI control data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"control service endpoint (service id 0x100) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 2 "
"will be used.")
parsed_args = parser.parse_args()
def main():
global parsed_args
load_options()
try:
if parsed_args.input_file:
infp = open(parsed_args.input_file, "r")
else:
infp = sys.stdin
if parsed_args.output_file:
outfp = open(parsed_args.output_file, "w")
else:
outfp = sys.stdout
if parsed_args.data_direction:
if parsed_args.data_direction[0] == 't2h':
t2h = True
elif parsed_args.data_direction[0] == 'h2t':
t2h = False
else:
sys.stderr.write('Unsupported data direction: {}\n'.format(parsed_args.data_direction[0]))
exit(1)
else:
# Interpret the data as host -> target is the default behaviour
t2h = False
hf = hexfilter.HexFilterLinux(skip_timestamps=(not parsed_args.keep_timestamps),
abs_timestamps=True,
dump_desc=parsed_args.desc_str,
dump_desc_invert=parsed_args.desc_str_invert,
log_has_timestamps=(not parsed_args.no_timestamps),
include_dump_desc_in_output=False,
remove_ascii_part=True)
if parsed_args.subparser_name == 'wmi-ctrl':
analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],
wmi_unified=(not parsed_args.wmi_old),
short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h,
tlv_analysis=parsed_args.tlv,
msg_id_filter=parsed_args.id,
msg_id_exclude_filter=parsed_args.skip_id)
if parsed_args.tlv:
parsed_args.print_data = True
elif parsed_args.subparser_name == 'htc-ctrl':
analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h)
elif parsed_args.subparser_name == 'htt':
analyzer = HttAnalyzer(eid=parsed_args.ep_id[0],
short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h)
elif parsed_args.subparser_name == 'all':
analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[0],
htt_eid=parsed_args.htt_ep_id[0],
wmi_unified=(not parsed_args.wmi_old),
short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h)
else:
sys.stderr.write('Unsupported subcommand: {}\n'.format(parsed_args.subparser_name))
for line in infp:
if hf.parse_line(line):
hexdata = hf.get_hex()
if analyzer.parse_hexdata(hexdata):
str = analyzer.get_id_str()
outfp.write(str)
if parsed_args.print_data:
analyzer.print_data(outfp)
except IOError as err:
sys.stderr.write('{}\n'.format(err))
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "3b381668dbb9b4e5a2e323dc4d6b5e3951736882",
"index": 1804,
"step-1": "<mask token>\n\n\ndef auto_int(x):\n return int(x, 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef auto_int(x):\n return int(x, 0)\n\n\ndef load_options():\n global parsed_args\n base_parser = argparse.ArgumentParser(add_help=False)\n base_parser.add_argument('-i', '--input-file', help=\n 'Input (log) file. If omitted, stdin will be read.')\n base_parser.add_argument('-o', '--output-file', help=\n 'Output file. If omitted, the output will be written to stdout.')\n base_parser.add_argument('-n', '--no-timestamps', action='store_true',\n help='Specifies whether or not the input file contains timestamps. ')\n base_parser.add_argument('-d', '--desc-str', nargs='+', type=str, help=\n 'Description string(s) of the dumps. Only dumps with a prefix matching any of the provided desc strings will be analyzed. If no --desc-str option is given, no description filtering will be performed. The prefix of a hexdump is the short description string before the address in each line of the dump, i.e the hexdump prefix. --desc-str is normally used to select between RX and TX logs and should be combined with a proper --data-direction option.'\n )\n base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,\n help=\n 'This option is used to specify how the hexdata should be interpreted. Valid values are: t2h (target to host) or h2t (host to target). With t2h, RX trailers will be printed if --print-data is used. h2t is default. This option should be combined with an applicable --desc-str option. '\n )\n base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,\n help=\n 'Description string(s) of the dumps to be. excluded. Similar to --desc-str, but all matching prefixes will be excluded from the analysis.'\n )\n base_parser.add_argument('-s', '--short-htc-header', action=\n 'store_true', help=\n 'Use 6 byte HTC header (\"old\" format) instead of 8 bytes.')\n base_parser.add_argument('-t', '--keep-timestamps', action='store_true',\n help=\n 'Keep the timestamps associated with each hexdump in the output. This option will only have effect if the log file contains timestamps.'\n )\n parser = argparse.ArgumentParser(prog='qca_hex_analyzer', description=\n description, parents=[base_parser])\n subparsers = parser.add_subparsers(dest='subparser_name')\n parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl', help=wmi_ctrl_help,\n description=wmi_ctrl_description, parents=[base_parser])\n parser_wmi_ctrl.add_argument('--wmi-old', action='store_true', help=\n 'Specifies whether or not the WMI messages are according to the \"old\" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'\n )\n parser_wmi_ctrl.add_argument('-p', '--print-data', action='store_true',\n help=\n 'Print WMI data message payload (and not just WMI message ID) for all encountered messages. '\n )\n parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,\n type=int, default=[2], help=\n 'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'\n )\n parser_wmi_ctrl.add_argument('--tlv', action='store_true', help=\n 'TLV analysis.Each WMI message will be interpreted as a TLV message and the content of the message will be. written out in text (instead of hexdump). If the encountered message is not supported by the parser, the hex data will be printed instead.'\n )\n parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID', nargs=\n '+', type=auto_int, help=\n \"WMI message id filter. Only WMI messages with an id matching any of the provided id's will be included in the output. If no --id | --msg-id option is given, no filtering will be performed. \"\n )\n parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',\n nargs='+', type=auto_int, help=\n \"WMI message id exclude filter. Similar to --id | --msg-id, but all matching id's will be excluded from the output. \"\n )\n parser_htc_ctrl = subparsers.add_parser('htc-ctrl', help=htc_ctrl_help,\n description=htc_ctrl_description, parents=[base_parser])\n parser_htc_ctrl.add_argument('-p', '--print-data', action='store_true',\n help=\n 'Print HTC ctrl data message payload (and not just message ID) for all encountered messages. '\n )\n parser_htt = subparsers.add_parser('htt', help=htt_help, description=\n htt_description, parents=[base_parser])\n parser_htt.add_argument('-p', '--print-data', action='store_true', help\n =\n 'Print HTT data message payload (and not just HTT message ID) for all encountered messages. '\n )\n parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1, type=\n int, default=[1], help=\n 'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'\n )\n parser_all = subparsers.add_parser('all', help=all_help, description=\n all_description, parents=[base_parser])\n parser_all.add_argument('-p', '--print-data', action='store_true', help\n =\n 'Print message payload (and not just message ID) for all encountered messages. '\n )\n parser_all.add_argument('--wmi-old', action='store_true', help=\n 'Specifies whether or not the WMI messages are according to the \"old\" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'\n )\n parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1, type=int,\n default=[1], help=\n 'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'\n )\n parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1, type\n =int, default=[2], help=\n 'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'\n )\n parsed_args = parser.parse_args()\n\n\ndef main():\n global parsed_args\n load_options()\n try:\n if parsed_args.input_file:\n infp = open(parsed_args.input_file, 'r')\n else:\n infp = sys.stdin\n if parsed_args.output_file:\n outfp = open(parsed_args.output_file, 'w')\n else:\n outfp = sys.stdout\n if parsed_args.data_direction:\n if parsed_args.data_direction[0] == 't2h':\n t2h = True\n elif parsed_args.data_direction[0] == 'h2t':\n t2h = False\n else:\n sys.stderr.write('Unsupported data direction: {}\\n'.format(\n parsed_args.data_direction[0]))\n exit(1)\n else:\n t2h = False\n hf = hexfilter.HexFilterLinux(skip_timestamps=not parsed_args.\n keep_timestamps, abs_timestamps=True, dump_desc=parsed_args.\n desc_str, dump_desc_invert=parsed_args.desc_str_invert,\n log_has_timestamps=not parsed_args.no_timestamps,\n include_dump_desc_in_output=False, remove_ascii_part=True)\n if parsed_args.subparser_name == 'wmi-ctrl':\n analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],\n wmi_unified=not parsed_args.wmi_old, short_htc_hdr=\n parsed_args.short_htc_header, timestamps=parsed_args.\n keep_timestamps, t2h=t2h, tlv_analysis=parsed_args.tlv,\n msg_id_filter=parsed_args.id, msg_id_exclude_filter=\n parsed_args.skip_id)\n if parsed_args.tlv:\n parsed_args.print_data = True\n elif parsed_args.subparser_name == 'htc-ctrl':\n analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.\n short_htc_header, timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n elif parsed_args.subparser_name == 'htt':\n analyzer = HttAnalyzer(eid=parsed_args.ep_id[0], short_htc_hdr=\n parsed_args.short_htc_header, timestamps=parsed_args.\n keep_timestamps, t2h=t2h)\n elif parsed_args.subparser_name == 'all':\n analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[\n 0], htt_eid=parsed_args.htt_ep_id[0], wmi_unified=not\n parsed_args.wmi_old, short_htc_hdr=parsed_args.\n short_htc_header, timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n else:\n sys.stderr.write('Unsupported subcommand: {}\\n'.format(\n parsed_args.subparser_name))\n for line in infp:\n if hf.parse_line(line):\n hexdata = hf.get_hex()\n if analyzer.parse_hexdata(hexdata):\n str = analyzer.get_id_str()\n outfp.write(str)\n if parsed_args.print_data:\n analyzer.print_data(outfp)\n except IOError as err:\n sys.stderr.write('{}\\n'.format(err))\n except:\n type, value, tb = sys.exc_info()\n traceback.print_exc()\n pdb.post_mortem(tb)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\ndescription = (\n 'Tool used to analyze hexdumps produced by a qca wireless kernel driver (such as ath6kl, ath10k or qcacld2.0). The hexdumps are assumed to contain dumps of the traffic between the driver and the target. No special preprocessing of the log files is required. Filter strings (description strings) can be used to limit the output (only RX or TX etc.). The driver must of course be configured to log all necessary debug data (for ath6kl and ath10k this means a proper debug mask). '\n )\nwmi_ctrl_help = (\n 'Subcommand for WMI control message parsing. This subcommand is used to extract WMI control messages from the input. '\n )\nwmi_ctrl_description = (\n \"Extracts WMI control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid WMI control message ID's will be printed together with the message enum string (from ath6kl source code). The --wmi-old option must be used if the driver does not use the WMI unified protocol (ath6kl). The WMI control message payload will also be printed together with message ID's if the --print-data option is used.\"\n )\nhtc_ctrl_help = (\n 'Subcommand for HTC control message parsing. This subcommand is used to extract HTC control messages from the input. '\n )\nhtc_ctrl_description = (\n \"Extracts HTC control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). All valid HTC control message ID's will be printed together with the message enum string (from ath6kl source code). The message payload will also be printed together with the message ID's if the --print-data option is used. HTC control messages will always be extracted from endpoint 0.\"\n )\nhtt_help = (\n 'Subcommand for HTT message parsing. This subcommand is used to extract HTT messages from the input. '\n )\nhtt_description = (\n \"Extracts HTT message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid HTT message ID's will be printed together with the message enum string (from ath10k source code). The message payload will also be printed together with message ID's if the --print-data option is used.\"\n )\nall_help = (\n 'Subcommand for parsing of all supported message types. This subcommand is used to extract both WMI control, HTC control and HTT messages from the input. '\n )\nall_description = (\n \"Extracts message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output-file). The messages can be any of the supported message types (currently only WMI controli, HTC control and HTT). --wmi-ctrl-ep-id and --htt-ep-id is used to determine from which endpoints WMI and HTT data will be extracted (see description of those options below). HTC control messages will always be extracted from ep 0. All valid message ID's will be printed together with a corresponding message enum string. The message payload will also be printed together with message ID's if the --print-data option is used.\"\n )\n\n\ndef auto_int(x):\n return int(x, 0)\n\n\ndef load_options():\n global parsed_args\n base_parser = argparse.ArgumentParser(add_help=False)\n base_parser.add_argument('-i', '--input-file', help=\n 'Input (log) file. If omitted, stdin will be read.')\n base_parser.add_argument('-o', '--output-file', help=\n 'Output file. If omitted, the output will be written to stdout.')\n base_parser.add_argument('-n', '--no-timestamps', action='store_true',\n help='Specifies whether or not the input file contains timestamps. ')\n base_parser.add_argument('-d', '--desc-str', nargs='+', type=str, help=\n 'Description string(s) of the dumps. Only dumps with a prefix matching any of the provided desc strings will be analyzed. If no --desc-str option is given, no description filtering will be performed. The prefix of a hexdump is the short description string before the address in each line of the dump, i.e the hexdump prefix. --desc-str is normally used to select between RX and TX logs and should be combined with a proper --data-direction option.'\n )\n base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,\n help=\n 'This option is used to specify how the hexdata should be interpreted. Valid values are: t2h (target to host) or h2t (host to target). With t2h, RX trailers will be printed if --print-data is used. h2t is default. This option should be combined with an applicable --desc-str option. '\n )\n base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,\n help=\n 'Description string(s) of the dumps to be. excluded. Similar to --desc-str, but all matching prefixes will be excluded from the analysis.'\n )\n base_parser.add_argument('-s', '--short-htc-header', action=\n 'store_true', help=\n 'Use 6 byte HTC header (\"old\" format) instead of 8 bytes.')\n base_parser.add_argument('-t', '--keep-timestamps', action='store_true',\n help=\n 'Keep the timestamps associated with each hexdump in the output. This option will only have effect if the log file contains timestamps.'\n )\n parser = argparse.ArgumentParser(prog='qca_hex_analyzer', description=\n description, parents=[base_parser])\n subparsers = parser.add_subparsers(dest='subparser_name')\n parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl', help=wmi_ctrl_help,\n description=wmi_ctrl_description, parents=[base_parser])\n parser_wmi_ctrl.add_argument('--wmi-old', action='store_true', help=\n 'Specifies whether or not the WMI messages are according to the \"old\" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'\n )\n parser_wmi_ctrl.add_argument('-p', '--print-data', action='store_true',\n help=\n 'Print WMI data message payload (and not just WMI message ID) for all encountered messages. '\n )\n parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,\n type=int, default=[2], help=\n 'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'\n )\n parser_wmi_ctrl.add_argument('--tlv', action='store_true', help=\n 'TLV analysis.Each WMI message will be interpreted as a TLV message and the content of the message will be. written out in text (instead of hexdump). If the encountered message is not supported by the parser, the hex data will be printed instead.'\n )\n parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID', nargs=\n '+', type=auto_int, help=\n \"WMI message id filter. Only WMI messages with an id matching any of the provided id's will be included in the output. If no --id | --msg-id option is given, no filtering will be performed. \"\n )\n parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',\n nargs='+', type=auto_int, help=\n \"WMI message id exclude filter. Similar to --id | --msg-id, but all matching id's will be excluded from the output. \"\n )\n parser_htc_ctrl = subparsers.add_parser('htc-ctrl', help=htc_ctrl_help,\n description=htc_ctrl_description, parents=[base_parser])\n parser_htc_ctrl.add_argument('-p', '--print-data', action='store_true',\n help=\n 'Print HTC ctrl data message payload (and not just message ID) for all encountered messages. '\n )\n parser_htt = subparsers.add_parser('htt', help=htt_help, description=\n htt_description, parents=[base_parser])\n parser_htt.add_argument('-p', '--print-data', action='store_true', help\n =\n 'Print HTT data message payload (and not just HTT message ID) for all encountered messages. '\n )\n parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1, type=\n int, default=[1], help=\n 'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'\n )\n parser_all = subparsers.add_parser('all', help=all_help, description=\n all_description, parents=[base_parser])\n parser_all.add_argument('-p', '--print-data', action='store_true', help\n =\n 'Print message payload (and not just message ID) for all encountered messages. '\n )\n parser_all.add_argument('--wmi-old', action='store_true', help=\n 'Specifies whether or not the WMI messages are according to the \"old\" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'\n )\n parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1, type=int,\n default=[1], help=\n 'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'\n )\n parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1, type\n =int, default=[2], help=\n 'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'\n )\n parsed_args = parser.parse_args()\n\n\ndef main():\n global parsed_args\n load_options()\n try:\n if parsed_args.input_file:\n infp = open(parsed_args.input_file, 'r')\n else:\n infp = sys.stdin\n if parsed_args.output_file:\n outfp = open(parsed_args.output_file, 'w')\n else:\n outfp = sys.stdout\n if parsed_args.data_direction:\n if parsed_args.data_direction[0] == 't2h':\n t2h = True\n elif parsed_args.data_direction[0] == 'h2t':\n t2h = False\n else:\n sys.stderr.write('Unsupported data direction: {}\\n'.format(\n parsed_args.data_direction[0]))\n exit(1)\n else:\n t2h = False\n hf = hexfilter.HexFilterLinux(skip_timestamps=not parsed_args.\n keep_timestamps, abs_timestamps=True, dump_desc=parsed_args.\n desc_str, dump_desc_invert=parsed_args.desc_str_invert,\n log_has_timestamps=not parsed_args.no_timestamps,\n include_dump_desc_in_output=False, remove_ascii_part=True)\n if parsed_args.subparser_name == 'wmi-ctrl':\n analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],\n wmi_unified=not parsed_args.wmi_old, short_htc_hdr=\n parsed_args.short_htc_header, timestamps=parsed_args.\n keep_timestamps, t2h=t2h, tlv_analysis=parsed_args.tlv,\n msg_id_filter=parsed_args.id, msg_id_exclude_filter=\n parsed_args.skip_id)\n if parsed_args.tlv:\n parsed_args.print_data = True\n elif parsed_args.subparser_name == 'htc-ctrl':\n analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.\n short_htc_header, timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n elif parsed_args.subparser_name == 'htt':\n analyzer = HttAnalyzer(eid=parsed_args.ep_id[0], short_htc_hdr=\n parsed_args.short_htc_header, timestamps=parsed_args.\n keep_timestamps, t2h=t2h)\n elif parsed_args.subparser_name == 'all':\n analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[\n 0], htt_eid=parsed_args.htt_ep_id[0], wmi_unified=not\n parsed_args.wmi_old, short_htc_hdr=parsed_args.\n short_htc_header, timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n else:\n sys.stderr.write('Unsupported subcommand: {}\\n'.format(\n parsed_args.subparser_name))\n for line in infp:\n if hf.parse_line(line):\n hexdata = hf.get_hex()\n if analyzer.parse_hexdata(hexdata):\n str = analyzer.get_id_str()\n outfp.write(str)\n if parsed_args.print_data:\n analyzer.print_data(outfp)\n except IOError as err:\n sys.stderr.write('{}\\n'.format(err))\n except:\n type, value, tb = sys.exc_info()\n traceback.print_exc()\n pdb.post_mortem(tb)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from collections import namedtuple\nimport argparse\nimport pdb\nimport traceback\nimport sys\nimport os\nfrom qca_hex_analyzer import WmiCtrlAnalyzer, HtcCtrlAnalyzer, HttAnalyzer, AllAnalyzer\nimport hexfilter\ndescription = (\n 'Tool used to analyze hexdumps produced by a qca wireless kernel driver (such as ath6kl, ath10k or qcacld2.0). The hexdumps are assumed to contain dumps of the traffic between the driver and the target. No special preprocessing of the log files is required. Filter strings (description strings) can be used to limit the output (only RX or TX etc.). The driver must of course be configured to log all necessary debug data (for ath6kl and ath10k this means a proper debug mask). '\n )\nwmi_ctrl_help = (\n 'Subcommand for WMI control message parsing. This subcommand is used to extract WMI control messages from the input. '\n )\nwmi_ctrl_description = (\n \"Extracts WMI control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid WMI control message ID's will be printed together with the message enum string (from ath6kl source code). The --wmi-old option must be used if the driver does not use the WMI unified protocol (ath6kl). The WMI control message payload will also be printed together with message ID's if the --print-data option is used.\"\n )\nhtc_ctrl_help = (\n 'Subcommand for HTC control message parsing. This subcommand is used to extract HTC control messages from the input. '\n )\nhtc_ctrl_description = (\n \"Extracts HTC control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). All valid HTC control message ID's will be printed together with the message enum string (from ath6kl source code). The message payload will also be printed together with the message ID's if the --print-data option is used. HTC control messages will always be extracted from endpoint 0.\"\n )\nhtt_help = (\n 'Subcommand for HTT message parsing. This subcommand is used to extract HTT messages from the input. '\n )\nhtt_description = (\n \"Extracts HTT message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid HTT message ID's will be printed together with the message enum string (from ath10k source code). The message payload will also be printed together with message ID's if the --print-data option is used.\"\n )\nall_help = (\n 'Subcommand for parsing of all supported message types. This subcommand is used to extract both WMI control, HTC control and HTT messages from the input. '\n )\nall_description = (\n \"Extracts message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output-file). The messages can be any of the supported message types (currently only WMI controli, HTC control and HTT). --wmi-ctrl-ep-id and --htt-ep-id is used to determine from which endpoints WMI and HTT data will be extracted (see description of those options below). HTC control messages will always be extracted from ep 0. All valid message ID's will be printed together with a corresponding message enum string. The message payload will also be printed together with message ID's if the --print-data option is used.\"\n )\n\n\ndef auto_int(x):\n return int(x, 0)\n\n\ndef load_options():\n global parsed_args\n base_parser = argparse.ArgumentParser(add_help=False)\n base_parser.add_argument('-i', '--input-file', help=\n 'Input (log) file. If omitted, stdin will be read.')\n base_parser.add_argument('-o', '--output-file', help=\n 'Output file. If omitted, the output will be written to stdout.')\n base_parser.add_argument('-n', '--no-timestamps', action='store_true',\n help='Specifies whether or not the input file contains timestamps. ')\n base_parser.add_argument('-d', '--desc-str', nargs='+', type=str, help=\n 'Description string(s) of the dumps. Only dumps with a prefix matching any of the provided desc strings will be analyzed. If no --desc-str option is given, no description filtering will be performed. The prefix of a hexdump is the short description string before the address in each line of the dump, i.e the hexdump prefix. --desc-str is normally used to select between RX and TX logs and should be combined with a proper --data-direction option.'\n )\n base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,\n help=\n 'This option is used to specify how the hexdata should be interpreted. Valid values are: t2h (target to host) or h2t (host to target). With t2h, RX trailers will be printed if --print-data is used. h2t is default. This option should be combined with an applicable --desc-str option. '\n )\n base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,\n help=\n 'Description string(s) of the dumps to be. excluded. Similar to --desc-str, but all matching prefixes will be excluded from the analysis.'\n )\n base_parser.add_argument('-s', '--short-htc-header', action=\n 'store_true', help=\n 'Use 6 byte HTC header (\"old\" format) instead of 8 bytes.')\n base_parser.add_argument('-t', '--keep-timestamps', action='store_true',\n help=\n 'Keep the timestamps associated with each hexdump in the output. This option will only have effect if the log file contains timestamps.'\n )\n parser = argparse.ArgumentParser(prog='qca_hex_analyzer', description=\n description, parents=[base_parser])\n subparsers = parser.add_subparsers(dest='subparser_name')\n parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl', help=wmi_ctrl_help,\n description=wmi_ctrl_description, parents=[base_parser])\n parser_wmi_ctrl.add_argument('--wmi-old', action='store_true', help=\n 'Specifies whether or not the WMI messages are according to the \"old\" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'\n )\n parser_wmi_ctrl.add_argument('-p', '--print-data', action='store_true',\n help=\n 'Print WMI data message payload (and not just WMI message ID) for all encountered messages. '\n )\n parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,\n type=int, default=[2], help=\n 'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'\n )\n parser_wmi_ctrl.add_argument('--tlv', action='store_true', help=\n 'TLV analysis.Each WMI message will be interpreted as a TLV message and the content of the message will be. written out in text (instead of hexdump). If the encountered message is not supported by the parser, the hex data will be printed instead.'\n )\n parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID', nargs=\n '+', type=auto_int, help=\n \"WMI message id filter. Only WMI messages with an id matching any of the provided id's will be included in the output. If no --id | --msg-id option is given, no filtering will be performed. \"\n )\n parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',\n nargs='+', type=auto_int, help=\n \"WMI message id exclude filter. Similar to --id | --msg-id, but all matching id's will be excluded from the output. \"\n )\n parser_htc_ctrl = subparsers.add_parser('htc-ctrl', help=htc_ctrl_help,\n description=htc_ctrl_description, parents=[base_parser])\n parser_htc_ctrl.add_argument('-p', '--print-data', action='store_true',\n help=\n 'Print HTC ctrl data message payload (and not just message ID) for all encountered messages. '\n )\n parser_htt = subparsers.add_parser('htt', help=htt_help, description=\n htt_description, parents=[base_parser])\n parser_htt.add_argument('-p', '--print-data', action='store_true', help\n =\n 'Print HTT data message payload (and not just HTT message ID) for all encountered messages. '\n )\n parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1, type=\n int, default=[1], help=\n 'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'\n )\n parser_all = subparsers.add_parser('all', help=all_help, description=\n all_description, parents=[base_parser])\n parser_all.add_argument('-p', '--print-data', action='store_true', help\n =\n 'Print message payload (and not just message ID) for all encountered messages. '\n )\n parser_all.add_argument('--wmi-old', action='store_true', help=\n 'Specifies whether or not the WMI messages are according to the \"old\" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'\n )\n parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1, type=int,\n default=[1], help=\n 'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'\n )\n parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1, type\n =int, default=[2], help=\n 'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'\n )\n parsed_args = parser.parse_args()\n\n\ndef main():\n global parsed_args\n load_options()\n try:\n if parsed_args.input_file:\n infp = open(parsed_args.input_file, 'r')\n else:\n infp = sys.stdin\n if parsed_args.output_file:\n outfp = open(parsed_args.output_file, 'w')\n else:\n outfp = sys.stdout\n if parsed_args.data_direction:\n if parsed_args.data_direction[0] == 't2h':\n t2h = True\n elif parsed_args.data_direction[0] == 'h2t':\n t2h = False\n else:\n sys.stderr.write('Unsupported data direction: {}\\n'.format(\n parsed_args.data_direction[0]))\n exit(1)\n else:\n t2h = False\n hf = hexfilter.HexFilterLinux(skip_timestamps=not parsed_args.\n keep_timestamps, abs_timestamps=True, dump_desc=parsed_args.\n desc_str, dump_desc_invert=parsed_args.desc_str_invert,\n log_has_timestamps=not parsed_args.no_timestamps,\n include_dump_desc_in_output=False, remove_ascii_part=True)\n if parsed_args.subparser_name == 'wmi-ctrl':\n analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],\n wmi_unified=not parsed_args.wmi_old, short_htc_hdr=\n parsed_args.short_htc_header, timestamps=parsed_args.\n keep_timestamps, t2h=t2h, tlv_analysis=parsed_args.tlv,\n msg_id_filter=parsed_args.id, msg_id_exclude_filter=\n parsed_args.skip_id)\n if parsed_args.tlv:\n parsed_args.print_data = True\n elif parsed_args.subparser_name == 'htc-ctrl':\n analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.\n short_htc_header, timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n elif parsed_args.subparser_name == 'htt':\n analyzer = HttAnalyzer(eid=parsed_args.ep_id[0], short_htc_hdr=\n parsed_args.short_htc_header, timestamps=parsed_args.\n keep_timestamps, t2h=t2h)\n elif parsed_args.subparser_name == 'all':\n analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[\n 0], htt_eid=parsed_args.htt_ep_id[0], wmi_unified=not\n parsed_args.wmi_old, short_htc_hdr=parsed_args.\n short_htc_header, timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n else:\n sys.stderr.write('Unsupported subcommand: {}\\n'.format(\n parsed_args.subparser_name))\n for line in infp:\n if hf.parse_line(line):\n hexdata = hf.get_hex()\n if analyzer.parse_hexdata(hexdata):\n str = analyzer.get_id_str()\n outfp.write(str)\n if parsed_args.print_data:\n analyzer.print_data(outfp)\n except IOError as err:\n sys.stderr.write('{}\\n'.format(err))\n except:\n type, value, tb = sys.exc_info()\n traceback.print_exc()\n pdb.post_mortem(tb)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from collections import namedtuple\n\nimport argparse\nimport pdb\nimport traceback\nimport sys\nimport os\nfrom qca_hex_analyzer import WmiCtrlAnalyzer, HtcCtrlAnalyzer, HttAnalyzer, AllAnalyzer\nimport hexfilter\n\ndescription = \\\n \"Tool used to analyze hexdumps produced by a qca wireless kernel \" \\\n \"driver (such as ath6kl, ath10k or qcacld2.0). \" \\\n \"The hexdumps are assumed to contain dumps of the traffic \" \\\n \"between the driver and the target. \" \\\n \"No special preprocessing of the log files is required. \" \\\n \"Filter strings (description strings) can be used to limit the output \" \\\n \"(only RX or TX etc.). \" \\\n \"The driver must of course be configured to log all necessary debug \" \\\n \"data (for ath6kl and ath10k this means a proper debug mask). \"\n\nwmi_ctrl_help = \\\n \"Subcommand for WMI control message parsing. \" \\\n \"This subcommand is used to extract WMI control messages from the input. \"\n\nwmi_ctrl_description = \\\n \"Extracts WMI control message hexdata from an input (--input-file). \" \\\n \"The extracted messages will be printed to the output (--output -file). \" \\\n \"--ep-id is used to determine from which HTC endpoint the data will \" \\\n \"be extracted (see description of that option below). \" \\\n \"All valid WMI control message ID's will be printed together with the \" \\\n \"message enum string (from ath6kl source code). \" \\\n \"The --wmi-old option must be used if the driver does not use the WMI \" \\\n \"unified protocol (ath6kl). \" \\\n \"The WMI control message payload will also be printed together with \" \\\n \"message ID's if the --print-data option is used.\"\n\nhtc_ctrl_help = \\\n \"Subcommand for HTC control message parsing. \" \\\n \"This subcommand is used to extract HTC control messages from the input. \"\n\nhtc_ctrl_description = \\\n \"Extracts HTC control message hexdata from an input (--input-file). \" \\\n \"The extracted messages will be printed to the output (--output -file). \" \\\n \"All valid HTC control message ID's will be printed together with the \" \\\n \"message enum string (from ath6kl source code). \" \\\n \"The message payload will also be printed together with the \" \\\n \"message ID's if the --print-data option is used. \" \\\n \"HTC control messages will always be extracted from endpoint 0.\"\n\nhtt_help = \\\n \"Subcommand for HTT message parsing. \" \\\n \"This subcommand is used to extract HTT messages from the input. \"\n\nhtt_description = \\\n \"Extracts HTT message hexdata from an input (--input-file). \" \\\n \"The extracted messages will be printed to the output (--output -file). \" \\\n \"--ep-id is used to determine from which HTC endpoint the data will \" \\\n \"be extracted (see description of that option below). \" \\\n \"All valid HTT message ID's will be printed together with the \" \\\n \"message enum string (from ath10k source code). \" \\\n \"The message payload will also be printed together with \" \\\n \"message ID's if the --print-data option is used.\"\n\nall_help = \\\n \"Subcommand for parsing of all supported message types. \" \\\n \"This subcommand is used to extract both WMI control, \" \\\n \"HTC control and HTT messages from the input. \"\n\nall_description = \\\n \"Extracts message hexdata from an input (--input-file). \" \\\n \"The extracted messages will be printed to the output (--output-file). \" \\\n \"The messages can be any of the supported message types \" \\\n \"(currently only WMI controli, HTC control and HTT). \" \\\n \"--wmi-ctrl-ep-id and --htt-ep-id is used to determine from which \" \\\n \"endpoints WMI and HTT data will be extracted \" \\\n \"(see description of those options below). \" \\\n \"HTC control messages will always be extracted from ep 0. \" \\\n \"All valid message ID's will be printed together \" \\\n \"with a corresponding message enum string. \" \\\n \"The message payload will also be printed together with \" \\\n \"message ID's if the --print-data option is used.\"\n\n\ndef auto_int(x):\n\n return int(x, 0)\n\n\ndef load_options():\n\n global parsed_args\n base_parser = argparse.ArgumentParser(add_help=False)\n\n base_parser.add_argument('-i', '--input-file',\n help=\"Input (log) file. If omitted, \"\n \"stdin will be read.\")\n base_parser.add_argument('-o', '--output-file',\n help=\"Output file. If omitted, \"\n \"the output will be written to stdout.\")\n base_parser.add_argument('-n', '--no-timestamps', action=\"store_true\",\n help=\"Specifies whether or not the input file \"\n \"contains timestamps. \")\n base_parser.add_argument('-d', '--desc-str', nargs='+', type=str,\n help=\"Description string(s) of the dumps. \"\n \"Only dumps with a prefix \"\n \"matching any of the provided desc strings \"\n \"will be analyzed. \"\n \"If no --desc-str option is given, no \"\n \"description filtering will be performed. \"\n \"The prefix of a hexdump is the short \"\n \"description string before the address \"\n \"in each line of the dump, i.e the hexdump \"\n \"prefix. \"\n \"--desc-str is normally used to select \"\n \"between RX and TX logs and should be \"\n \"combined with a proper --data-direction \"\n \"option.\")\n base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,\n help=\"This option is used to specify how the \"\n \"hexdata should be interpreted. \"\n \"Valid values are: \"\n \"t2h (target to host) or h2t (host to target). \"\n \"With t2h, RX trailers will be printed if \"\n \"--print-data is used. h2t is default. \"\n \"This option should be combined with an \"\n \"applicable --desc-str option. \")\n base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,\n help=\"Description string(s) of the dumps to be. \"\n \"excluded. Similar to --desc-str, but all \"\n \"matching prefixes will be excluded from \"\n \"the analysis.\")\n base_parser.add_argument('-s', '--short-htc-header', action=\"store_true\",\n help=\"Use 6 byte HTC header (\\\"old\\\" format) \"\n \"instead of 8 bytes.\")\n base_parser.add_argument('-t', '--keep-timestamps', action=\"store_true\",\n help=\"Keep the timestamps associated with each \"\n \"hexdump in the output. \"\n \"This option will only have effect if the \"\n \"log file contains timestamps.\")\n\n parser = argparse.ArgumentParser(prog=\"qca_hex_analyzer\",\n description=description,\n parents=[base_parser])\n\n subparsers = parser.add_subparsers(dest=\"subparser_name\")\n parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl',\n help=wmi_ctrl_help,\n description=wmi_ctrl_description,\n parents=[base_parser])\n parser_wmi_ctrl.add_argument('--wmi-old', action=\"store_true\",\n help=\"Specifies whether or not the WMI messages \"\n \"are according to the \\\"old\\\" WMI protocol. \"\n \"If not set, the messages will be interpreted \"\n \"according to the unified WMI format\")\n parser_wmi_ctrl.add_argument('-p', '--print-data', action=\"store_true\",\n help=\"Print WMI data message payload (and not just \"\n \"WMI message ID) for all encountered messages. \")\n parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,\n type=int, default=[2],\n help=\"WMI control service endpoint ID. \"\n \"This is the endpoint where the WMI control data is \"\n \"expected to be present. Make sure the endpoint \"\n \"matches the endpoint id associated with the \"\n \"control service endpoint (service id 0x100) \"\n \"of the driver (the endpoint received from the \"\n \"target in the HTC service connect response). \"\n \"If this option is omitted a default value of 2 \"\n \"will be used.\")\n parser_wmi_ctrl.add_argument('--tlv', action=\"store_true\",\n help=\"TLV analysis.\"\n \"Each WMI message will be interpreted as a TLV \"\n \"message and the content of the message will be. \"\n \"written out in text (instead of hexdump). \"\n \"If the encountered message is not supported by \"\n \"the parser, the hex data will be printed instead.\")\n parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID',\n nargs='+', type=auto_int,\n help=\"WMI message id filter. \"\n \"Only WMI messages with an id matching any of the \"\n \"provided id's will be included in the output. \"\n \"If no --id | --msg-id option is given, no \"\n \"filtering will be performed. \")\n parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',\n nargs='+', type=auto_int,\n help=\"WMI message id exclude filter. \"\n \"Similar to --id | --msg-id, but all matching \"\n \"id's will be excluded from the output. \")\n parser_htc_ctrl = subparsers.add_parser('htc-ctrl',\n help=htc_ctrl_help,\n description=htc_ctrl_description,\n parents=[base_parser])\n parser_htc_ctrl.add_argument('-p', '--print-data', action=\"store_true\",\n help=\"Print HTC ctrl data message payload (and not just \"\n \"message ID) for all encountered messages. \")\n parser_htt = subparsers.add_parser('htt',\n help=htt_help,\n description=htt_description,\n parents=[base_parser])\n parser_htt.add_argument('-p', '--print-data', action=\"store_true\",\n help=\"Print HTT data message payload (and not just \"\n \"HTT message ID) for all encountered messages. \")\n parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1,\n type=int, default=[1],\n help=\"HTT service endpoint ID. \"\n \"This is the endpoint where the HTT data is \"\n \"expected to be present. Make sure the endpoint \"\n \"matches the endpoint id associated with the \"\n \"HTT endpoint (service id 0x300) \"\n \"of the driver (the endpoint received from the \"\n \"target in the HTC service connect response). \"\n \"If this option is omitted a default value of 1 \"\n \"will be used.\")\n parser_all = subparsers.add_parser('all',\n help=all_help,\n description=all_description,\n parents=[base_parser])\n parser_all.add_argument('-p', '--print-data', action=\"store_true\",\n help=\"Print message payload (and not just \"\n \"message ID) for all encountered messages. \")\n parser_all.add_argument('--wmi-old', action=\"store_true\",\n help=\"Specifies whether or not the WMI messages \"\n \"are according to the \\\"old\\\" WMI protocol. \"\n \"If not set, the messages will be interpreted \"\n \"according to the unified WMI format\")\n parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1,\n type=int, default=[1],\n help=\"HTT service endpoint ID. \"\n \"This is the endpoint where the HTT data is \"\n \"expected to be present. Make sure the endpoint \"\n \"matches the endpoint id associated with the \"\n \"HTT endpoint (service id 0x300) \"\n \"of the driver (the endpoint received from the \"\n \"target in the HTC service connect response). \"\n \"If this option is omitted a default value of 1 \"\n \"will be used.\")\n parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1,\n type=int, default=[2],\n help=\"WMI control service endpoint ID. \"\n \"This is the endpoint where the WMI control data is \"\n \"expected to be present. Make sure the endpoint \"\n \"matches the endpoint id associated with the \"\n \"control service endpoint (service id 0x100) \"\n \"of the driver (the endpoint received from the \"\n \"target in the HTC service connect response). \"\n \"If this option is omitted a default value of 2 \"\n \"will be used.\")\n parsed_args = parser.parse_args()\n\n\ndef main():\n global parsed_args\n load_options()\n\n try:\n if parsed_args.input_file:\n infp = open(parsed_args.input_file, \"r\")\n else:\n infp = sys.stdin\n if parsed_args.output_file:\n outfp = open(parsed_args.output_file, \"w\")\n else:\n outfp = sys.stdout\n\n if parsed_args.data_direction:\n if parsed_args.data_direction[0] == 't2h':\n t2h = True\n elif parsed_args.data_direction[0] == 'h2t':\n t2h = False\n else:\n sys.stderr.write('Unsupported data direction: {}\\n'.format(parsed_args.data_direction[0]))\n exit(1)\n else:\n # Interpret the data as host -> target is the default behaviour\n t2h = False\n\n hf = hexfilter.HexFilterLinux(skip_timestamps=(not parsed_args.keep_timestamps),\n abs_timestamps=True,\n dump_desc=parsed_args.desc_str,\n dump_desc_invert=parsed_args.desc_str_invert,\n log_has_timestamps=(not parsed_args.no_timestamps),\n include_dump_desc_in_output=False,\n remove_ascii_part=True)\n\n if parsed_args.subparser_name == 'wmi-ctrl':\n analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],\n wmi_unified=(not parsed_args.wmi_old),\n short_htc_hdr=parsed_args.short_htc_header,\n timestamps=parsed_args.keep_timestamps,\n t2h=t2h,\n tlv_analysis=parsed_args.tlv,\n msg_id_filter=parsed_args.id,\n msg_id_exclude_filter=parsed_args.skip_id)\n if parsed_args.tlv:\n parsed_args.print_data = True\n elif parsed_args.subparser_name == 'htc-ctrl':\n analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.short_htc_header,\n timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n elif parsed_args.subparser_name == 'htt':\n analyzer = HttAnalyzer(eid=parsed_args.ep_id[0],\n short_htc_hdr=parsed_args.short_htc_header,\n timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n elif parsed_args.subparser_name == 'all':\n analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[0],\n htt_eid=parsed_args.htt_ep_id[0],\n wmi_unified=(not parsed_args.wmi_old),\n short_htc_hdr=parsed_args.short_htc_header,\n timestamps=parsed_args.keep_timestamps,\n t2h=t2h)\n else:\n sys.stderr.write('Unsupported subcommand: {}\\n'.format(parsed_args.subparser_name))\n\n for line in infp:\n if hf.parse_line(line):\n hexdata = hf.get_hex()\n if analyzer.parse_hexdata(hexdata):\n str = analyzer.get_id_str()\n outfp.write(str)\n if parsed_args.print_data:\n analyzer.print_data(outfp)\n\n except IOError as err:\n sys.stderr.write('{}\\n'.format(err))\n except:\n type, value, tb = sys.exc_info()\n traceback.print_exc()\n pdb.post_mortem(tb)\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, 11):
km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=
0.0001, random_state=0)
km.fit(arr)
distortions.append(km.inertia_)
<|reserved_special_token_0|>
print('The number of clusters are: ' + str(kn.knee))
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
<|reserved_special_token_0|>
for i in range(0, 3):
plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],
marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))
plt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,
marker='*', c='red', edgecolor='black', label='centroids')
plt.legend(scatterpoints=1)
plt.grid()
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])
arr = df.to_numpy()
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=
0.0001, random_state=0)
km.fit(arr)
distortions.append(km.inertia_)
kn = KneeLocator(range(1, 11), distortions, curve='convex', direction=
'decreasing')
print('The number of clusters are: ' + str(kn.knee))
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
km = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol
=0.0001, random_state=0)
y_km = km.fit_predict(arr)
colors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',
'lightpink', 'black', 'gold', 'coral', 'navy']
markers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']
for i in range(0, 3):
plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],
marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))
plt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,
marker='*', c='red', edgecolor='black', label='centroids')
plt.legend(scatterpoints=1)
plt.grid()
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from kneed import KneeLocator
df = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])
arr = df.to_numpy()
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=
0.0001, random_state=0)
km.fit(arr)
distortions.append(km.inertia_)
kn = KneeLocator(range(1, 11), distortions, curve='convex', direction=
'decreasing')
print('The number of clusters are: ' + str(kn.knee))
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
km = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol
=0.0001, random_state=0)
y_km = km.fit_predict(arr)
colors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',
'lightpink', 'black', 'gold', 'coral', 'navy']
markers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']
for i in range(0, 3):
plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],
marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))
plt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,
marker='*', c='red', edgecolor='black', label='centroids')
plt.legend(scatterpoints=1)
plt.grid()
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from kneed import KneeLocator
#Create a panda data frame from the csv file
df = pd.read_csv('ClusterPlot.csv', usecols=['V1','V2'])
#Convert the panda data frame to a NumPy array
arr = df.to_numpy()
#Code used to visualise the data and check if the import worked correctly
#Now commented out but retained for debugging.
#plt.scatter(arr[:,0],arr[:,1], label='True Position')
#plt.show()
# Create an array to store the Sum of Squared Errors or the cluster inertia
# for the k-clusters in multiple runs of the K-Means algo with different
# number of clusters assumed
distortions = []
for i in range(1,11):
km = KMeans(n_clusters=i, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0)
km.fit(arr)
distortions.append(km.inertia_)
# Find the elbow or knee from the plot of no. of clusters vs distortion for that
# number. This algorithm locates the knee and that is used to provide the Number
# of clusters to the main run of K-means algo.
kn = KneeLocator(range(1,11), distortions, curve='convex', direction='decreasing')
print('The number of clusters are: ' + str(kn.knee))
#plot the no. of clusters vs distortion graph and annotate the elbow point
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
#From the sciKitLearn clustering algorithms, the K-means clustering
#algorithm is used.
km = KMeans(
n_clusters=kn.knee, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0
)
#Obtain the cluster labels by running the K-means algorithm with
# the parameters defined above.
y_km = km.fit_predict(arr)
#Color Array
colors = ['lightgreen','orange','lightblue','azure', 'crimson','lightpink','black','gold', 'coral', 'navy']
#Marker Array
markers = ['s','o','v', '^', '<', '>', 'h', 'H', 'D', 'd']
#Plot the clusters.
for i in range(0, 3):
plt.scatter(
arr[y_km == i, 0], arr[y_km == i, 1],
s=50, c=colors[i],
marker=markers[i], edgecolor='black',
label='cluster ' + str(i+1)
)
# Plotting the centroids for all the clusters.
plt.scatter(
km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
s=250, marker='*',
c='red', edgecolor='black',
label='centroids'
)
plt.legend(scatterpoints=1)
plt.grid()
plt.show()
|
flexible
|
{
"blob_id": "09417014963172fc71b4268aafdec1405c04f34d",
"index": 3472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\n<mask token>\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\n<mask token>\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-3": "<mask token>\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])\narr = df.to_numpy()\ndistortions = []\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\nkn = KneeLocator(range(1, 11), distortions, curve='convex', direction=\n 'decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\nkm = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol\n =0.0001, random_state=0)\ny_km = km.fit_predict(arr)\ncolors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',\n 'lightpink', 'black', 'gold', 'coral', 'navy']\nmarkers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom kneed import KneeLocator\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])\narr = df.to_numpy()\ndistortions = []\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\nkn = KneeLocator(range(1, 11), distortions, curve='convex', direction=\n 'decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\nkm = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol\n =0.0001, random_state=0)\ny_km = km.fit_predict(arr)\ncolors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',\n 'lightpink', 'black', 'gold', 'coral', 'navy']\nmarkers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom kneed import KneeLocator\n\n#Create a panda data frame from the csv file\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1','V2'])\n\n#Convert the panda data frame to a NumPy array\narr = df.to_numpy()\n\n#Code used to visualise the data and check if the import worked correctly\n#Now commented out but retained for debugging.\n#plt.scatter(arr[:,0],arr[:,1], label='True Position')\n#plt.show()\n\n# Create an array to store the Sum of Squared Errors or the cluster inertia\n# for the k-clusters in multiple runs of the K-Means algo with different\n# number of clusters assumed\n\ndistortions = []\n\nfor i in range(1,11):\n km = KMeans(n_clusters=i, init='random',\n n_init=10, max_iter=300,\n tol=1e-04, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\n\n# Find the elbow or knee from the plot of no. of clusters vs distortion for that\n# number. This algorithm locates the knee and that is used to provide the Number\n# of clusters to the main run of K-means algo.\n\nkn = KneeLocator(range(1,11), distortions, curve='convex', direction='decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\n\n#plot the no. of clusters vs distortion graph and annotate the elbow point\n\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\n\n\n\n#From the sciKitLearn clustering algorithms, the K-means clustering\n#algorithm is used.\nkm = KMeans(\n n_clusters=kn.knee, init='random',\n n_init=10, max_iter=300,\n tol=1e-04, random_state=0\n)\n\n#Obtain the cluster labels by running the K-means algorithm with\n# the parameters defined above.\ny_km = km.fit_predict(arr)\n\n#Color Array\ncolors = ['lightgreen','orange','lightblue','azure', 'crimson','lightpink','black','gold', 'coral', 'navy']\n\n#Marker Array\nmarkers = ['s','o','v', '^', '<', '>', 'h', 'H', 'D', 'd']\n\n#Plot the clusters.\nfor i in range(0, 3):\n plt.scatter(\n arr[y_km == i, 0], arr[y_km == i, 1],\n s=50, c=colors[i],\n marker=markers[i], edgecolor='black',\n label='cluster ' + str(i+1)\n)\n\n# Plotting the centroids for all the clusters.\nplt.scatter(\n km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],\n s=250, marker='*',\n c='red', edgecolor='black',\n label='centroids'\n)\n\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CategoryView(View):
"""文章分类页"""
def get(self, request, category_id):
category = Category.objects.get(id=int(category_id))
category_articles = category.article_set.all()
new_articles = category_articles.order_by('-modified_time')
category_hot_articles = category_articles.order_by('-views')[0:5]
category_guide_articles = category_articles.order_by('?')[0:6]
category_articles_nums = category_articles.count()
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(new_articles, 10, request=request)
category_all_articles = p.page(page)
return render(request, 'category.html', {'category': category,
'category_all_articles': category_all_articles,
'category_hot_articles': category_hot_articles,
'category_guide_articles': category_guide_articles})
class A_listView(View):
"""文章列表"""
def get(self, request):
hot_articles = Article.objects.all().order_by('-views')[0:10]
guide_articles = Article.objects.order_by('-modified_time')[0:26]
return render(request, 'a_list.html', {'hot_articles': hot_articles,
'guide_articles': guide_articles})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArticleView(View):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CategoryView(View):
"""文章分类页"""
def get(self, request, category_id):
category = Category.objects.get(id=int(category_id))
category_articles = category.article_set.all()
new_articles = category_articles.order_by('-modified_time')
category_hot_articles = category_articles.order_by('-views')[0:5]
category_guide_articles = category_articles.order_by('?')[0:6]
category_articles_nums = category_articles.count()
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(new_articles, 10, request=request)
category_all_articles = p.page(page)
return render(request, 'category.html', {'category': category,
'category_all_articles': category_all_articles,
'category_hot_articles': category_hot_articles,
'category_guide_articles': category_guide_articles})
class A_listView(View):
"""文章列表"""
def get(self, request):
hot_articles = Article.objects.all().order_by('-views')[0:10]
guide_articles = Article.objects.order_by('-modified_time')[0:26]
return render(request, 'a_list.html', {'hot_articles': hot_articles,
'guide_articles': guide_articles})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArticleView(View):
<|reserved_special_token_0|>
def get(self, request, article_id):
article = Article.objects.get(id=int(article_id))
article.views += 1
article.save()
previous_article = Article.objects.filter(created_time__gt=article.
created_time, category=article.category.id).first()
next_article = Article.objects.filter(created_time__lt=article.
created_time, category=article.category.id).last()
tags = article.tags.all()
relate_articles = Article.objects.all().order_by('?')[0:10]
guide_articles = Article.objects.prefetch_related('tags').order_by('?'
)[:5]
hot_articles = Article.objects.all().order_by('-views')[0:6]
return render(request, 'article.html', {'article': article,
'previous_article': previous_article, 'next_article':
next_article, 'relate_articles': relate_articles,
'guide_articles': guide_articles, 'hot_articles': hot_articles,
'tags': tags})
class CategoryView(View):
"""文章分类页"""
def get(self, request, category_id):
category = Category.objects.get(id=int(category_id))
category_articles = category.article_set.all()
new_articles = category_articles.order_by('-modified_time')
category_hot_articles = category_articles.order_by('-views')[0:5]
category_guide_articles = category_articles.order_by('?')[0:6]
category_articles_nums = category_articles.count()
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(new_articles, 10, request=request)
category_all_articles = p.page(page)
return render(request, 'category.html', {'category': category,
'category_all_articles': category_all_articles,
'category_hot_articles': category_hot_articles,
'category_guide_articles': category_guide_articles})
class A_listView(View):
"""文章列表"""
def get(self, request):
hot_articles = Article.objects.all().order_by('-views')[0:10]
guide_articles = Article.objects.order_by('-modified_time')[0:26]
return render(request, 'a_list.html', {'hot_articles': hot_articles,
'guide_articles': guide_articles})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArticleView(View):
"""文章详情页"""
def get(self, request, article_id):
article = Article.objects.get(id=int(article_id))
article.views += 1
article.save()
previous_article = Article.objects.filter(created_time__gt=article.
created_time, category=article.category.id).first()
next_article = Article.objects.filter(created_time__lt=article.
created_time, category=article.category.id).last()
tags = article.tags.all()
relate_articles = Article.objects.all().order_by('?')[0:10]
guide_articles = Article.objects.prefetch_related('tags').order_by('?'
)[:5]
hot_articles = Article.objects.all().order_by('-views')[0:6]
return render(request, 'article.html', {'article': article,
'previous_article': previous_article, 'next_article':
next_article, 'relate_articles': relate_articles,
'guide_articles': guide_articles, 'hot_articles': hot_articles,
'tags': tags})
class CategoryView(View):
"""文章分类页"""
def get(self, request, category_id):
category = Category.objects.get(id=int(category_id))
category_articles = category.article_set.all()
new_articles = category_articles.order_by('-modified_time')
category_hot_articles = category_articles.order_by('-views')[0:5]
category_guide_articles = category_articles.order_by('?')[0:6]
category_articles_nums = category_articles.count()
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(new_articles, 10, request=request)
category_all_articles = p.page(page)
return render(request, 'category.html', {'category': category,
'category_all_articles': category_all_articles,
'category_hot_articles': category_hot_articles,
'category_guide_articles': category_guide_articles})
class A_listView(View):
"""文章列表"""
def get(self, request):
hot_articles = Article.objects.all().order_by('-views')[0:10]
guide_articles = Article.objects.order_by('-modified_time')[0:26]
return render(request, 'a_list.html', {'hot_articles': hot_articles,
'guide_articles': guide_articles})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.views.generic.base import View
from .models import Article, Tag, Category
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
class ArticleView(View):
'''文章详情页'''
def get(self, request, article_id):
# 文章详情
article = Article.objects.get(id=int(article_id))
article.views += 1
article.save()
previous_article = Article.objects.filter(created_time__gt=article.created_time,
category=article.category.id).first()
next_article = Article.objects.filter(created_time__lt=article.created_time,
category=article.category.id).last()
# 取出文章对应标签所有标签
tags = article.tags.all()
relate_articles = Article.objects.all().order_by('?')[0:10]
# 旅游指南是多对多查询
guide_articles = Article.objects.prefetch_related('tags').order_by('?')[:5]
hot_articles = Article.objects.all().order_by('-views')[0:6]
return render(request, 'article.html', {
'article': article,
'previous_article': previous_article,
'next_article': next_article,
'relate_articles': relate_articles,
'guide_articles': guide_articles,
'hot_articles': hot_articles,
'tags': tags
})
class CategoryView(View):
'''文章分类页'''
def get(self, request, category_id):
category = Category.objects.get(id=int(category_id))
category_articles = category.article_set.all()
new_articles = category_articles.order_by('-modified_time')
category_hot_articles = category_articles.order_by('-views')[0:5]
category_guide_articles = category_articles.order_by('?')[0:6]
category_articles_nums = category_articles.count()
# 对新闻进行分页
# 尝试获取前台get请求传递过来的page参数
# 如果是不合法的配置参数默认返回第一页
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 这里指从category_articles中取10个出来,每页显示10个
p = Paginator(new_articles, 10, request=request)
category_all_articles = p.page(page)
return render(request, 'category.html', {
'category': category,
'category_all_articles': category_all_articles,
'category_hot_articles': category_hot_articles,
'category_guide_articles': category_guide_articles,
})
class A_listView(View):
'''文章列表'''
def get(self, request):
hot_articles = Article.objects.all().order_by('-views')[0:10]
guide_articles = Article.objects.order_by('-modified_time')[0:26]
return render(request, 'a_list.html', {
'hot_articles': hot_articles,
'guide_articles': guide_articles,
})
|
flexible
|
{
"blob_id": "2fd40f4d69223933d53d8ed2abd5f6d3ccd2f509",
"index": 3850,
"step-1": "<mask token>\n\n\nclass CategoryView(View):\n \"\"\"文章分类页\"\"\"\n\n def get(self, request, category_id):\n category = Category.objects.get(id=int(category_id))\n category_articles = category.article_set.all()\n new_articles = category_articles.order_by('-modified_time')\n category_hot_articles = category_articles.order_by('-views')[0:5]\n category_guide_articles = category_articles.order_by('?')[0:6]\n category_articles_nums = category_articles.count()\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(new_articles, 10, request=request)\n category_all_articles = p.page(page)\n return render(request, 'category.html', {'category': category,\n 'category_all_articles': category_all_articles,\n 'category_hot_articles': category_hot_articles,\n 'category_guide_articles': category_guide_articles})\n\n\nclass A_listView(View):\n \"\"\"文章列表\"\"\"\n\n def get(self, request):\n hot_articles = Article.objects.all().order_by('-views')[0:10]\n guide_articles = Article.objects.order_by('-modified_time')[0:26]\n return render(request, 'a_list.html', {'hot_articles': hot_articles,\n 'guide_articles': guide_articles})\n",
"step-2": "<mask token>\n\n\nclass ArticleView(View):\n <mask token>\n <mask token>\n\n\nclass CategoryView(View):\n \"\"\"文章分类页\"\"\"\n\n def get(self, request, category_id):\n category = Category.objects.get(id=int(category_id))\n category_articles = category.article_set.all()\n new_articles = category_articles.order_by('-modified_time')\n category_hot_articles = category_articles.order_by('-views')[0:5]\n category_guide_articles = category_articles.order_by('?')[0:6]\n category_articles_nums = category_articles.count()\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(new_articles, 10, request=request)\n category_all_articles = p.page(page)\n return render(request, 'category.html', {'category': category,\n 'category_all_articles': category_all_articles,\n 'category_hot_articles': category_hot_articles,\n 'category_guide_articles': category_guide_articles})\n\n\nclass A_listView(View):\n \"\"\"文章列表\"\"\"\n\n def get(self, request):\n hot_articles = Article.objects.all().order_by('-views')[0:10]\n guide_articles = Article.objects.order_by('-modified_time')[0:26]\n return render(request, 'a_list.html', {'hot_articles': hot_articles,\n 'guide_articles': guide_articles})\n",
"step-3": "<mask token>\n\n\nclass ArticleView(View):\n <mask token>\n\n def get(self, request, article_id):\n article = Article.objects.get(id=int(article_id))\n article.views += 1\n article.save()\n previous_article = Article.objects.filter(created_time__gt=article.\n created_time, category=article.category.id).first()\n next_article = Article.objects.filter(created_time__lt=article.\n created_time, category=article.category.id).last()\n tags = article.tags.all()\n relate_articles = Article.objects.all().order_by('?')[0:10]\n guide_articles = Article.objects.prefetch_related('tags').order_by('?'\n )[:5]\n hot_articles = Article.objects.all().order_by('-views')[0:6]\n return render(request, 'article.html', {'article': article,\n 'previous_article': previous_article, 'next_article':\n next_article, 'relate_articles': relate_articles,\n 'guide_articles': guide_articles, 'hot_articles': hot_articles,\n 'tags': tags})\n\n\nclass CategoryView(View):\n \"\"\"文章分类页\"\"\"\n\n def get(self, request, category_id):\n category = Category.objects.get(id=int(category_id))\n category_articles = category.article_set.all()\n new_articles = category_articles.order_by('-modified_time')\n category_hot_articles = category_articles.order_by('-views')[0:5]\n category_guide_articles = category_articles.order_by('?')[0:6]\n category_articles_nums = category_articles.count()\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(new_articles, 10, request=request)\n category_all_articles = p.page(page)\n return render(request, 'category.html', {'category': category,\n 'category_all_articles': category_all_articles,\n 'category_hot_articles': category_hot_articles,\n 'category_guide_articles': category_guide_articles})\n\n\nclass A_listView(View):\n \"\"\"文章列表\"\"\"\n\n def get(self, request):\n hot_articles = Article.objects.all().order_by('-views')[0:10]\n guide_articles = Article.objects.order_by('-modified_time')[0:26]\n return render(request, 'a_list.html', {'hot_articles': hot_articles,\n 'guide_articles': guide_articles})\n",
"step-4": "<mask token>\n\n\nclass ArticleView(View):\n \"\"\"文章详情页\"\"\"\n\n def get(self, request, article_id):\n article = Article.objects.get(id=int(article_id))\n article.views += 1\n article.save()\n previous_article = Article.objects.filter(created_time__gt=article.\n created_time, category=article.category.id).first()\n next_article = Article.objects.filter(created_time__lt=article.\n created_time, category=article.category.id).last()\n tags = article.tags.all()\n relate_articles = Article.objects.all().order_by('?')[0:10]\n guide_articles = Article.objects.prefetch_related('tags').order_by('?'\n )[:5]\n hot_articles = Article.objects.all().order_by('-views')[0:6]\n return render(request, 'article.html', {'article': article,\n 'previous_article': previous_article, 'next_article':\n next_article, 'relate_articles': relate_articles,\n 'guide_articles': guide_articles, 'hot_articles': hot_articles,\n 'tags': tags})\n\n\nclass CategoryView(View):\n \"\"\"文章分类页\"\"\"\n\n def get(self, request, category_id):\n category = Category.objects.get(id=int(category_id))\n category_articles = category.article_set.all()\n new_articles = category_articles.order_by('-modified_time')\n category_hot_articles = category_articles.order_by('-views')[0:5]\n category_guide_articles = category_articles.order_by('?')[0:6]\n category_articles_nums = category_articles.count()\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(new_articles, 10, request=request)\n category_all_articles = p.page(page)\n return render(request, 'category.html', {'category': category,\n 'category_all_articles': category_all_articles,\n 'category_hot_articles': category_hot_articles,\n 'category_guide_articles': category_guide_articles})\n\n\nclass A_listView(View):\n \"\"\"文章列表\"\"\"\n\n def get(self, request):\n hot_articles = Article.objects.all().order_by('-views')[0:10]\n guide_articles = Article.objects.order_by('-modified_time')[0:26]\n return render(request, 'a_list.html', {'hot_articles': hot_articles,\n 'guide_articles': guide_articles})\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic.base import View\nfrom .models import Article, Tag, Category\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\n\n\nclass ArticleView(View):\n '''文章详情页'''\n\n def get(self, request, article_id):\n # 文章详情\n article = Article.objects.get(id=int(article_id))\n article.views += 1\n article.save()\n previous_article = Article.objects.filter(created_time__gt=article.created_time,\n category=article.category.id).first()\n next_article = Article.objects.filter(created_time__lt=article.created_time,\n category=article.category.id).last()\n # 取出文章对应标签所有标签\n tags = article.tags.all()\n relate_articles = Article.objects.all().order_by('?')[0:10]\n # 旅游指南是多对多查询\n guide_articles = Article.objects.prefetch_related('tags').order_by('?')[:5]\n hot_articles = Article.objects.all().order_by('-views')[0:6]\n return render(request, 'article.html', {\n 'article': article,\n 'previous_article': previous_article,\n 'next_article': next_article,\n 'relate_articles': relate_articles,\n 'guide_articles': guide_articles,\n 'hot_articles': hot_articles,\n 'tags': tags\n })\n\n\nclass CategoryView(View):\n '''文章分类页'''\n\n def get(self, request, category_id):\n category = Category.objects.get(id=int(category_id))\n category_articles = category.article_set.all()\n new_articles = category_articles.order_by('-modified_time')\n category_hot_articles = category_articles.order_by('-views')[0:5]\n category_guide_articles = category_articles.order_by('?')[0:6]\n category_articles_nums = category_articles.count()\n # 对新闻进行分页\n # 尝试获取前台get请求传递过来的page参数\n # 如果是不合法的配置参数默认返回第一页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n # 这里指从category_articles中取10个出来,每页显示10个\n p = Paginator(new_articles, 10, request=request)\n category_all_articles = p.page(page)\n\n return render(request, 'category.html', {\n 'category': category,\n 'category_all_articles': category_all_articles,\n 'category_hot_articles': category_hot_articles,\n 'category_guide_articles': category_guide_articles,\n })\n\n\nclass A_listView(View):\n '''文章列表'''\n\n def get(self, request):\n hot_articles = Article.objects.all().order_by('-views')[0:10]\n guide_articles = Article.objects.order_by('-modified_time')[0:26]\n return render(request, 'a_list.html', {\n 'hot_articles': hot_articles,\n 'guide_articles': guide_articles,\n })\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def rotateRight(self, head: ListNode, k: int) ->ListNode:
if head is None or head.next is None or k == 0:
return head
tmp, length = head, 1
while tmp.next:
tmp = tmp.next
length += 1
k = k % length
if k == 0:
return head
fast = slow = head
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next
slow.next = None
fast.next = head
return res
<|reserved_special_token_1|>
# Runtime: 44 ms, faster than 62.95% of Python3 online submissions for Rotate List.
# Memory Usage: 13.9 MB, less than 6.05% of Python3 online submissions for Rotate List.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if head is None or head.next is None or k == 0:
return head
tmp, length = head, 1
while tmp.next:
tmp = tmp.next
length += 1
k = k % length
if k == 0: # don't need rotate
return head
fast = slow = head # fast and slow point
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next # ready result
slow.next = None
fast.next = head
return res
|
flexible
|
{
"blob_id": "a79c9799ed237a943ae3d249a4d66eb2f8693e83",
"index": 1896,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def rotateRight(self, head: ListNode, k: int) ->ListNode:\n if head is None or head.next is None or k == 0:\n return head\n tmp, length = head, 1\n while tmp.next:\n tmp = tmp.next\n length += 1\n k = k % length\n if k == 0:\n return head\n fast = slow = head\n for _ in range(k):\n fast = fast.next\n while fast.next:\n fast = fast.next\n slow = slow.next\n res = slow.next\n slow.next = None\n fast.next = head\n return res\n",
"step-4": "# Runtime: 44 ms, faster than 62.95% of Python3 online submissions for Rotate List.\r\n# Memory Usage: 13.9 MB, less than 6.05% of Python3 online submissions for Rotate List.\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\r\n if head is None or head.next is None or k == 0:\r\n return head\r\n tmp, length = head, 1\r\n while tmp.next:\r\n tmp = tmp.next\r\n length += 1\r\n k = k % length\r\n if k == 0: # don't need rotate\r\n return head\r\n fast = slow = head # fast and slow point\r\n for _ in range(k):\r\n fast = fast.next\r\n while fast.next:\r\n fast = fast.next\r\n slow = slow.next\r\n res = slow.next # ready result\r\n slow.next = None\r\n fast.next = head\r\n return res\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'
] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'
self.config['SQLALCHEMY_DATABASE_URI'
] = 'pgfdw://hydra@localhost/hydra'
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
<|reserved_special_token_0|>
@product_api.declare('GET')
def get_product(payload, product_id):
products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id
=app.config['CLIENT_ID']).all()
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'
] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'
self.config['SQLALCHEMY_DATABASE_URI'
] = 'pgfdw://hydra@localhost/hydra'
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
def filter_query(query):
return query.filter_by(client_id=app.config['CLIENT_ID'])
<|reserved_special_token_0|>
rest(Labo, only=('label',))
<|reserved_special_token_0|>
@image_api.declare('GET')
def get_image(payload, cip, name, ext):
result = image_api.get(payload, cip=cip)
for obj in getattr(result, 'data', result)['objects']:
obj['name'] = quote(obj['name'])
obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)
return result
@product_api.declare('GET')
def get_product(payload, product_id):
products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id
=app.config['CLIENT_ID']).all()
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'
] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'
self.config['SQLALCHEMY_DATABASE_URI'
] = 'pgfdw://hydra@localhost/hydra'
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
def filter_query(query):
return query.filter_by(client_id=app.config['CLIENT_ID'])
app = Hydra(__name__)
rest = UnRest(app, db.session)
rest(Labo, only=('label',))
product_api = rest(Product, query=filter_query, only=('product_id', 'title',
'description', 'cip', 'resip_labo_code', 'type_product'))
image_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))
@image_api.declare('GET')
def get_image(payload, cip, name, ext):
result = image_api.get(payload, cip=cip)
for obj in getattr(result, 'data', result)['objects']:
obj['name'] = quote(obj['name'])
obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)
return result
@product_api.declare('GET')
def get_product(payload, product_id):
products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id
=app.config['CLIENT_ID']).all()
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
<|reserved_special_token_1|>
from urllib.parse import quote
from top_model import db
from top_model.ext.flask import FlaskTopModel
from top_model.filesystem import ProductPhotoCIP
from top_model.webstore import Product, Labo
from unrest import UnRest
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'
] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'
self.config['SQLALCHEMY_DATABASE_URI'
] = 'pgfdw://hydra@localhost/hydra'
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
def filter_query(query):
return query.filter_by(client_id=app.config['CLIENT_ID'])
app = Hydra(__name__)
rest = UnRest(app, db.session)
rest(Labo, only=('label',))
product_api = rest(Product, query=filter_query, only=('product_id', 'title',
'description', 'cip', 'resip_labo_code', 'type_product'))
image_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))
@image_api.declare('GET')
def get_image(payload, cip, name, ext):
result = image_api.get(payload, cip=cip)
for obj in getattr(result, 'data', result)['objects']:
obj['name'] = quote(obj['name'])
obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)
return result
@product_api.declare('GET')
def get_product(payload, product_id):
products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id
=app.config['CLIENT_ID']).all()
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
<|reserved_special_token_1|>
from urllib.parse import quote
from top_model import db
from top_model.ext.flask import FlaskTopModel
from top_model.filesystem import ProductPhotoCIP
from top_model.webstore import Product, Labo
from unrest import UnRest
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'] = (
'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')
self.config['SQLALCHEMY_DATABASE_URI'] = (
'pgfdw://hydra@localhost/hydra')
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
def filter_query(query):
return query.filter_by(client_id=app.config['CLIENT_ID'])
app = Hydra(__name__)
rest = UnRest(app, db.session)
rest(Labo, only=('label',))
product_api = rest(Product, query=filter_query, only=(
'product_id', 'title', 'description', 'cip', 'resip_labo_code',
'type_product'))
image_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))
@image_api.declare('GET')
def get_image(payload, cip, name, ext):
result = image_api.get(payload, cip=cip)
for obj in getattr(result, 'data', result)['objects']:
obj['name'] = quote(obj['name'])
obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)
return result
@product_api.declare('GET')
def get_product(payload, product_id):
products = (
Product.query
.filter_by(cip=str(product_id))
.filter_by(client_id=app.config['CLIENT_ID'])
.all())
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
|
flexible
|
{
"blob_id": "de3a4053b5b0d4d2d5c2dcd317e64cf9b4faeb75",
"index": 562,
"step-1": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\n<mask token>\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-2": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\n<mask token>\nrest(Labo, only=('label',))\n<mask token>\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-3": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-4": "from urllib.parse import quote\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-5": "from urllib.parse import quote\n\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'] = (\n 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')\n self.config['SQLALCHEMY_DATABASE_URI'] = (\n 'pgfdw://hydra@localhost/hydra')\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\n\n\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=(\n 'product_id', 'title', 'description', 'cip', 'resip_labo_code',\n 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = (\n Product.query\n .filter_by(cip=str(product_id))\n .filter_by(client_id=app.config['CLIENT_ID'])\n .all())\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import os
import time
import glob
import torch
import random
import signal
import argparse
from models.trainer import build_trainer
from models import data_loader, model_builder
from models.pytorch_pretrained_bert.modeling import BertConfig
from utils import distributed
from utils.logging import logger, init_logger
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class MultiRunning(object):
def __init__(self, args, device_id):
self.args = args
self.device_id = device_id
def multi_card_run(self):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = self.args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
process = []
for i in range(nb_gpu):
self.device_id = i
process.append(mp.Process(target=self.multi_card_train, args=(self.args, self.device_id, error_queue),
daemon=True))
process[i].start()
logger.info(" Starting process pid: %d " % process[i].pid)
error_handler.add_child(process[i].pid)
for p in process:
p.join()
def multi_card_train(self, error_queue):
""" run process """
setattr(self.args, 'gpu_ranks', [int(i) for i in self.args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(self.device_id, self.args.world_size, self.args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != self.args.gpu_ranks[self.device_id]:
raise AssertionError("An error occurred in Distributed initialization")
runner = Running(self.args, self.device_id)
runner.train()
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((self.args.gpu_ranks[self.device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
class Running(object):
"""Run Model"""
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval',
'rnn_size']
self.device = "cpu" if self.args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % self.device_id)
logger.info(f'Device {self.device}')
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
def baseline(self, cal_lead=False, cal_oracle=False):
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, None, None)
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
def train_iter(self):
return data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'train', shuffle=True),
self.args.batch_size, self.device, shuffle=True, is_test=False)
def train(self):
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=True)
if self.args.train_from:
logger.info(f'Loading checkpoint from {self.args.train_from}')
checkpoint = torch.load(self.args.train_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
model.load_cp(checkpoint)
optimizer = model_builder.build_optim(self.args, model, checkpoint)
else:
optimizer = model_builder.build_optim(self.args, model, None)
logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, optimizer)
trainer.train(self.train_iter, self.args.train_steps)
def validate(self, step):
logger.info(f'Loading checkpoint from {self.args.validate_from}')
checkpoint = torch.load(self.args.validate_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
print(self.args)
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
valid_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'valid', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=False)
trainer = build_trainer(self.args, self.device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def wait_and_validate(self):
time_step = 0
if self.args.test_all:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = self.validate(step=step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if i - max_step > 10:
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info(f'PPL {str(xent_lst)}')
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
self.test(step)
else:
while True:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if os.path.getsize(cp) <= 0:
time.sleep(60)
continue
if time_of_cp > time_step:
time_step = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
self.validate(step)
self.test(step)
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if time_of_cp > time_step:
continue
else:
time.sleep(300)
def test(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
# logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer.test(test_iter, step)
def gen_features_vector(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
# logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer.gen_features_vector(test_iter, step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='transformer', type=str,
choices=['classifier', 'transformer', 'rnn', 'baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train', 'validate', 'test', 'vector'])
parser.add_argument("-data_name", default='chinese_summary', help='vy_text')
parser.add_argument("-bert_data_path", default='./data/bert_data/', help='./data/bert_data/')
parser.add_argument("-model_path", default='./models/models_check_points/')
parser.add_argument("-result_path", default='./results/')
parser.add_argument("-temp_dir", default='./temp/')
parser.add_argument("-bert_pretrained_model_path", default='./models/pytorch_pretrained_bert/bert_pretrain/')
parser.add_argument("-bert_config_path", default='./models/pytorch_pretrained_bert/bert_pretrain/bert_config.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=2048, type=int)
parser.add_argument("-heads", default=8, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optimizer", default='adam', type=str)
parser.add_argument("-lr", default=2e-3, type=float, help='learning rate')
parser.add_argument("-beta1", default=0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='noam', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5000, type=int)
parser.add_argument("-accum_count", default=2, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=50, type=int)
parser.add_argument("-train_steps", default=50000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('-visible_gpus', default='0', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='./logs/project.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-test_from", default='./models/models_check_points/model_step_50000.pt')
parser.add_argument("-train_from", default='', help='./models/models_check_points/model_step_45000.pt')
parser.add_argument("-validate_from", default='../models/models_check_points/model_step_50000.pt')
parser.add_argument("-report_rouge", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-shuffle_data", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-vy_predict", type=str2bool, nargs='?', const=False, default=True)
_args = parser.parse_args()
gpu_ranks: str = str(_args.gpu_ranks)
_args.gpu_ranks = [int(i) for i in gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = _args.visible_gpus
init_logger(_args.log_file)
_device = "cpu" if _args.visible_gpus == '-1' else "cuda"
_device_id = 0 if _device == "cuda" else -1
runner = Running(args=_args, device_id=_device_id)
multi_runner = MultiRunning(args=_args, device_id=_device_id)
if _args.world_size > 1:
multi_runner.multi_card_run()
elif _args.mode == 'train':
runner.train()
elif _args.mode == 'validate':
runner.wait_and_validate()
elif _args.mode == 'test':
runner.test()
elif _args.mode == 'lead':
runner.baseline(cal_lead=True)
elif _args.mode == 'oracle':
runner.baseline(cal_oracle=True)
elif _args.mode == 'vector':
runner.gen_features_vector()
|
normal
|
{
"blob_id": "3adb50a6375a73f786369dd22712a657b66f758e",
"index": 8432,
"step-1": "<mask token>\n\n\nclass Running(object):\n <mask token>\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n <mask token>\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n <mask token>\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(target=self.error_listener,\n daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n rank, original_trace = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT)\n rank, original_trace = self.error_queue.get()\n msg = (\n '\\n\\n-- Tracebacks above this line can probably\\n be ignored --\\n\\n'\n )\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(\n self.args, 'train', shuffle=True), self.args.batch_size, self.\n device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path,\n 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MultiRunning(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(target=self.error_listener,\n daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n rank, original_trace = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT)\n rank, original_trace = self.error_queue.get()\n msg = (\n '\\n\\n-- Tracebacks above this line can probably\\n be ignored --\\n\\n'\n )\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(\n self.args, 'train', shuffle=True), self.args.batch_size, self.\n device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path,\n 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MultiRunning(object):\n\n def __init__(self, args, device_id):\n self.args = args\n self.device_id = device_id\n <mask token>\n <mask token>\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(target=self.error_listener,\n daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n rank, original_trace = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT)\n rank, original_trace = self.error_queue.get()\n msg = (\n '\\n\\n-- Tracebacks above this line can probably\\n be ignored --\\n\\n'\n )\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(\n self.args, 'train', shuffle=True), self.args.batch_size, self.\n device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path,\n 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\nfrom __future__ import division\n\nimport os\nimport time\nimport glob\nimport torch\nimport random\nimport signal\nimport argparse\n\nfrom models.trainer import build_trainer\nfrom models import data_loader, model_builder\nfrom models.pytorch_pretrained_bert.modeling import BertConfig\n\nfrom utils import distributed\nfrom utils.logging import logger, init_logger\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\nclass MultiRunning(object):\n def __init__(self, args, device_id):\n self.args = args\n self.device_id = device_id\n\n def multi_card_run(self):\n \"\"\" Spawns 1 process per GPU \"\"\"\n init_logger()\n\n nb_gpu = self.args.world_size\n mp = torch.multiprocessing.get_context('spawn')\n\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n\n # Train with multiprocessing.\n process = []\n for i in range(nb_gpu):\n self.device_id = i\n process.append(mp.Process(target=self.multi_card_train, args=(self.args, self.device_id, error_queue),\n daemon=True))\n process[i].start()\n logger.info(\" Starting process pid: %d \" % process[i].pid)\n error_handler.add_child(process[i].pid)\n for p in process:\n p.join()\n\n def multi_card_train(self, error_queue):\n \"\"\" run process \"\"\"\n setattr(self.args, 'gpu_ranks', [int(i) for i in self.args.gpu_ranks])\n\n try:\n gpu_rank = distributed.multi_init(self.device_id, self.args.world_size, self.args.gpu_ranks)\n print('gpu_rank %d' % gpu_rank)\n if gpu_rank != self.args.gpu_ranks[self.device_id]:\n raise AssertionError(\"An error occurred in Distributed initialization\")\n runner = Running(self.args, self.device_id)\n runner.train()\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((self.args.gpu_ranks[self.device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval',\n 'rnn_size']\n\n self.device = \"cpu\" if self.args.visible_gpus == '-1' else \"cuda\"\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=True)\n\n trainer = build_trainer(self.args, self.device_id, None, None)\n\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'train', shuffle=True),\n self.args.batch_size, self.device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=True)\n\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda storage, loc: storage)\n\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n\n valid_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'valid', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n # logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n # logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-encoder\", default='transformer', type=str,\n choices=['classifier', 'transformer', 'rnn', 'baseline'])\n parser.add_argument(\"-mode\", default='train', type=str, choices=['train', 'validate', 'test', 'vector'])\n parser.add_argument(\"-data_name\", default='chinese_summary', help='vy_text')\n parser.add_argument(\"-bert_data_path\", default='./data/bert_data/', help='./data/bert_data/')\n parser.add_argument(\"-model_path\", default='./models/models_check_points/')\n parser.add_argument(\"-result_path\", default='./results/')\n parser.add_argument(\"-temp_dir\", default='./temp/')\n parser.add_argument(\"-bert_pretrained_model_path\", default='./models/pytorch_pretrained_bert/bert_pretrain/')\n parser.add_argument(\"-bert_config_path\", default='./models/pytorch_pretrained_bert/bert_pretrain/bert_config.json')\n\n parser.add_argument(\"-batch_size\", default=1000, type=int)\n\n parser.add_argument(\"-use_interval\", type=str2bool, nargs='?', const=True, default=True)\n parser.add_argument(\"-hidden_size\", default=128, type=int)\n parser.add_argument(\"-ff_size\", default=2048, type=int)\n parser.add_argument(\"-heads\", default=8, type=int)\n parser.add_argument(\"-inter_layers\", default=2, type=int)\n parser.add_argument(\"-rnn_size\", default=512, type=int)\n\n parser.add_argument(\"-param_init\", default=0, type=float)\n parser.add_argument(\"-param_init_glorot\", type=str2bool, nargs='?', const=True, default=True)\n parser.add_argument(\"-dropout\", default=0.1, type=float)\n parser.add_argument(\"-optimizer\", default='adam', type=str)\n parser.add_argument(\"-lr\", default=2e-3, type=float, help='learning rate')\n parser.add_argument(\"-beta1\", default=0.9, type=float)\n parser.add_argument(\"-beta2\", default=0.999, type=float)\n parser.add_argument(\"-decay_method\", default='noam', type=str)\n parser.add_argument(\"-warmup_steps\", default=8000, type=int)\n parser.add_argument(\"-max_grad_norm\", default=0, type=float)\n\n parser.add_argument(\"-save_checkpoint_steps\", default=5000, type=int)\n parser.add_argument(\"-accum_count\", default=2, type=int)\n parser.add_argument(\"-world_size\", default=1, type=int)\n parser.add_argument(\"-report_every\", default=50, type=int)\n parser.add_argument(\"-train_steps\", default=50000, type=int)\n parser.add_argument(\"-recall_eval\", type=str2bool, nargs='?', const=True, default=False)\n\n parser.add_argument('-visible_gpus', default='0', type=str)\n parser.add_argument('-gpu_ranks', default='0', type=str)\n parser.add_argument('-log_file', default='./logs/project.log')\n parser.add_argument('-dataset', default='')\n parser.add_argument('-seed', default=666, type=int)\n\n parser.add_argument(\"-test_all\", type=str2bool, nargs='?', const=True, default=False)\n parser.add_argument(\"-test_from\", default='./models/models_check_points/model_step_50000.pt')\n parser.add_argument(\"-train_from\", default='', help='./models/models_check_points/model_step_45000.pt')\n parser.add_argument(\"-validate_from\", default='../models/models_check_points/model_step_50000.pt')\n parser.add_argument(\"-report_rouge\", type=str2bool, nargs='?', const=True, default=False)\n parser.add_argument(\"-block_trigram\", type=str2bool, nargs='?', const=True, default=True)\n\n parser.add_argument(\"-shuffle_data\", type=str2bool, nargs='?', const=True, default=False)\n parser.add_argument(\"-vy_predict\", type=str2bool, nargs='?', const=False, default=True)\n\n _args = parser.parse_args()\n\n gpu_ranks: str = str(_args.gpu_ranks)\n _args.gpu_ranks = [int(i) for i in gpu_ranks.split(',')]\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = _args.visible_gpus\n\n init_logger(_args.log_file)\n _device = \"cpu\" if _args.visible_gpus == '-1' else \"cuda\"\n _device_id = 0 if _device == \"cuda\" else -1\n\n runner = Running(args=_args, device_id=_device_id)\n multi_runner = MultiRunning(args=_args, device_id=_device_id)\n if _args.world_size > 1:\n multi_runner.multi_card_run()\n elif _args.mode == 'train':\n runner.train()\n elif _args.mode == 'validate':\n runner.wait_and_validate()\n elif _args.mode == 'test':\n runner.test()\n elif _args.mode == 'lead':\n runner.baseline(cal_lead=True)\n elif _args.mode == 'oracle':\n runner.baseline(cal_oracle=True)\n elif _args.mode == 'vector':\n runner.gen_features_vector()\n",
"step-ids": [
7,
16,
17,
18,
24
]
}
|
[
7,
16,
17,
18,
24
] |
from skimage.measure import structural_similarity as ssim
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import pathlib
import warnings
from PIL import Image
from numpy import array
source_path = "/home/justin/Desktop/FeatureClustering/"
feature_length = len(os.listdir(source_path))
vector_data = []
recorded_lines = []
labels =[]
for folder in os.listdir(source_path):
for filename in os.listdir(source_path + folder +"/"):
if(filename != "---.png"):
linename = filename.split("-")
linename = linename[0]+"-"+linename[1]
if(linename not in recorded_lines):
vector = np.zeros(shape=(feature_length))
label = 0 if "G" in filename else 1
vector_data.append(vector)
labels.append(label)
recorded_lines.append(linename)
else:
index = recorded_lines.index(linename)
vector_data[index][int(folder)] += 1
#print(np.c_[recorded_lines,vector_data])
np.save("/home/justin/Desktop/vector_data.npy", vector_data)
np.save("/home/justin/Desktop/label_data.npy", labels)
|
normal
|
{
"blob_id": "ff1346060141ee3504aa5ee9de3a6ec196bcc216",
"index": 3918,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder + '/'):\n if filename != '---.png':\n linename = filename.split('-')\n linename = linename[0] + '-' + linename[1]\n if linename not in recorded_lines:\n vector = np.zeros(shape=feature_length)\n label = 0 if 'G' in filename else 1\n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\nnp.save('/home/justin/Desktop/vector_data.npy', vector_data)\nnp.save('/home/justin/Desktop/label_data.npy', labels)\n",
"step-3": "<mask token>\nsource_path = '/home/justin/Desktop/FeatureClustering/'\nfeature_length = len(os.listdir(source_path))\nvector_data = []\nrecorded_lines = []\nlabels = []\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder + '/'):\n if filename != '---.png':\n linename = filename.split('-')\n linename = linename[0] + '-' + linename[1]\n if linename not in recorded_lines:\n vector = np.zeros(shape=feature_length)\n label = 0 if 'G' in filename else 1\n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\nnp.save('/home/justin/Desktop/vector_data.npy', vector_data)\nnp.save('/home/justin/Desktop/label_data.npy', labels)\n",
"step-4": "from skimage.measure import structural_similarity as ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport pathlib\nimport warnings\nfrom PIL import Image\nfrom numpy import array\nsource_path = '/home/justin/Desktop/FeatureClustering/'\nfeature_length = len(os.listdir(source_path))\nvector_data = []\nrecorded_lines = []\nlabels = []\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder + '/'):\n if filename != '---.png':\n linename = filename.split('-')\n linename = linename[0] + '-' + linename[1]\n if linename not in recorded_lines:\n vector = np.zeros(shape=feature_length)\n label = 0 if 'G' in filename else 1\n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\nnp.save('/home/justin/Desktop/vector_data.npy', vector_data)\nnp.save('/home/justin/Desktop/label_data.npy', labels)\n",
"step-5": "from skimage.measure import structural_similarity as ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport pathlib\nimport warnings\nfrom PIL import Image\nfrom numpy import array\n\nsource_path = \"/home/justin/Desktop/FeatureClustering/\"\n\nfeature_length = len(os.listdir(source_path))\nvector_data = []\nrecorded_lines = []\nlabels =[]\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder +\"/\"):\n if(filename != \"---.png\"):\n linename = filename.split(\"-\")\n linename = linename[0]+\"-\"+linename[1]\n \n if(linename not in recorded_lines):\n vector = np.zeros(shape=(feature_length))\n label = 0 if \"G\" in filename else 1 \n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\n\n#print(np.c_[recorded_lines,vector_data])\nnp.save(\"/home/justin/Desktop/vector_data.npy\", vector_data)\nnp.save(\"/home/justin/Desktop/label_data.npy\", labels)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def generate_random_email_and_password():
"""
Function to generate random email id and password
"""
email = fake.email()
password_string = fake.password()
random_info = {'email': email, 'password': password_string}
return random_info
def generate_random_coupon_code(suffix=None):
"""
This function generates random coupon codes
"""
code = fake.password(length=8, special_chars=False, digits=True,
upper_case=True, lower_case=False)
if suffix:
code += suffix
return code
def create_coupon(data):
"""
This function is used to create a coupon using API call.
"""
return woo_request_helper().post_details('coupons', data,
expected_status_code=201)
<|reserved_special_token_0|>
def create_user(data):
"""
This function is used to create the user by user data using API call.
"""
return woo_request_helper().post_details(wc_endpoint='customers',
params=data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_random_email_and_password():
"""
Function to generate random email id and password
"""
email = fake.email()
password_string = fake.password()
random_info = {'email': email, 'password': password_string}
return random_info
def generate_random_coupon_code(suffix=None):
"""
This function generates random coupon codes
"""
code = fake.password(length=8, special_chars=False, digits=True,
upper_case=True, lower_case=False)
if suffix:
code += suffix
return code
def create_coupon(data):
"""
This function is used to create a coupon using API call.
"""
return woo_request_helper().post_details('coupons', data,
expected_status_code=201)
def list_all_products():
"""
This function returns the list of all products from the API.
"""
all_products = []
max_pages = 1000
page_num = 1
while page_num < max_pages:
param = {'per_page': 100, 'page': page_num}
rs_api = woo_request_helper().get_details(wc_endpoint='products',
params=param)
if rs_api:
page_num += 1
all_products.extend(rs_api)
else:
break
return all_products
def get_products_by_id(product_id):
"""
This function is used to get the product by product id using API call.
"""
rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.
format(product_id))
return rs_api
def update_random_product_price(product_id, data):
"""
This function is used to update the product by product id and data to update
using the API call.
"""
return woo_request_helper().put_details(wc_endpoint='products/{}'.
format(product_id), params=data)
def create_user(data):
"""
This function is used to create the user by user data using API call.
"""
return woo_request_helper().post_details(wc_endpoint='customers',
params=data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_random_email_and_password():
"""
Function to generate random email id and password
"""
email = fake.email()
password_string = fake.password()
random_info = {'email': email, 'password': password_string}
return random_info
def generate_random_coupon_code(suffix=None):
"""
This function generates random coupon codes
"""
code = fake.password(length=8, special_chars=False, digits=True,
upper_case=True, lower_case=False)
if suffix:
code += suffix
return code
def create_coupon(data):
"""
This function is used to create a coupon using API call.
"""
return woo_request_helper().post_details('coupons', data,
expected_status_code=201)
def list_all_products():
"""
This function returns the list of all products from the API.
"""
all_products = []
max_pages = 1000
page_num = 1
while page_num < max_pages:
param = {'per_page': 100, 'page': page_num}
rs_api = woo_request_helper().get_details(wc_endpoint='products',
params=param)
if rs_api:
page_num += 1
all_products.extend(rs_api)
else:
break
return all_products
def get_products_by_id(product_id):
"""
This function is used to get the product by product id using API call.
"""
rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.
format(product_id))
return rs_api
def update_random_product_price(product_id, data):
"""
This function is used to update the product by product id and data to update
using the API call.
"""
return woo_request_helper().put_details(wc_endpoint='products/{}'.
format(product_id), params=data)
def create_user(data):
"""
This function is used to create the user by user data using API call.
"""
return woo_request_helper().post_details(wc_endpoint='customers',
params=data)
def delete_user_by_id(user_id):
"""
This function is used to delete the user by user id using API call.
"""
return woo_request_helper().delete_details(wc_endpoint='customers/{}'.
format(user_id))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fake = Faker()
def generate_random_email_and_password():
"""
Function to generate random email id and password
"""
email = fake.email()
password_string = fake.password()
random_info = {'email': email, 'password': password_string}
return random_info
def generate_random_coupon_code(suffix=None):
"""
This function generates random coupon codes
"""
code = fake.password(length=8, special_chars=False, digits=True,
upper_case=True, lower_case=False)
if suffix:
code += suffix
return code
def create_coupon(data):
"""
This function is used to create a coupon using API call.
"""
return woo_request_helper().post_details('coupons', data,
expected_status_code=201)
def list_all_products():
"""
This function returns the list of all products from the API.
"""
all_products = []
max_pages = 1000
page_num = 1
while page_num < max_pages:
param = {'per_page': 100, 'page': page_num}
rs_api = woo_request_helper().get_details(wc_endpoint='products',
params=param)
if rs_api:
page_num += 1
all_products.extend(rs_api)
else:
break
return all_products
def get_products_by_id(product_id):
"""
This function is used to get the product by product id using API call.
"""
rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.
format(product_id))
return rs_api
def update_random_product_price(product_id, data):
"""
This function is used to update the product by product id and data to update
using the API call.
"""
return woo_request_helper().put_details(wc_endpoint='products/{}'.
format(product_id), params=data)
def create_user(data):
"""
This function is used to create the user by user data using API call.
"""
return woo_request_helper().post_details(wc_endpoint='customers',
params=data)
def delete_user_by_id(user_id):
"""
This function is used to delete the user by user id using API call.
"""
return woo_request_helper().delete_details(wc_endpoint='customers/{}'.
format(user_id))
<|reserved_special_token_1|>
"""
This is the common util file
"""
from faker import Faker
from pytest_practical.helper.api_helpers import woo_request_helper
fake = Faker()
def generate_random_email_and_password():
"""
Function to generate random email id and password
"""
email = fake.email()
password_string = fake.password()
random_info = {
'email': email,
'password': password_string
}
return random_info
def generate_random_coupon_code(suffix=None):
"""
This function generates random coupon codes
"""
code = fake.password(length=8, special_chars=False, digits=True, upper_case=True, lower_case=False)
if suffix:
code += suffix
return code
def create_coupon(data):
"""
This function is used to create a coupon using API call.
"""
return woo_request_helper().post_details('coupons', data, expected_status_code=201)
def list_all_products():
"""
This function returns the list of all products from the API.
"""
all_products = []
max_pages = 1000
page_num = 1
while page_num < max_pages:
param = {
'per_page': 100,
'page': page_num,
}
rs_api = woo_request_helper().get_details(wc_endpoint='products', params=param)
if rs_api:
page_num += 1
all_products.extend(rs_api)
else:
break
return all_products
def get_products_by_id(product_id):
"""
This function is used to get the product by product id using API call.
"""
rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.format(product_id))
return rs_api
def update_random_product_price(product_id, data):
"""
This function is used to update the product by product id and data to update
using the API call.
"""
return woo_request_helper().put_details(wc_endpoint='products/{}'.format(product_id), params=data)
def create_user(data):
"""
This function is used to create the user by user data using API call.
"""
return woo_request_helper().post_details(wc_endpoint='customers', params=data)
def delete_user_by_id(user_id):
"""
This function is used to delete the user by user id using API call.
"""
return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))
|
flexible
|
{
"blob_id": "0dab663847fdb4efa419882519616b7a89d0bbe8",
"index": 1716,
"step-1": "<mask token>\n\n\ndef generate_random_email_and_password():\n \"\"\"\n Function to generate random email id and password\n \"\"\"\n email = fake.email()\n password_string = fake.password()\n random_info = {'email': email, 'password': password_string}\n return random_info\n\n\ndef generate_random_coupon_code(suffix=None):\n \"\"\"\n This function generates random coupon codes\n \"\"\"\n code = fake.password(length=8, special_chars=False, digits=True,\n upper_case=True, lower_case=False)\n if suffix:\n code += suffix\n return code\n\n\ndef create_coupon(data):\n \"\"\"\n This function is used to create a coupon using API call.\n \"\"\"\n return woo_request_helper().post_details('coupons', data,\n expected_status_code=201)\n\n\n<mask token>\n\n\ndef create_user(data):\n \"\"\"\n This function is used to create the user by user data using API call.\n \"\"\"\n return woo_request_helper().post_details(wc_endpoint='customers',\n params=data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_random_email_and_password():\n \"\"\"\n Function to generate random email id and password\n \"\"\"\n email = fake.email()\n password_string = fake.password()\n random_info = {'email': email, 'password': password_string}\n return random_info\n\n\ndef generate_random_coupon_code(suffix=None):\n \"\"\"\n This function generates random coupon codes\n \"\"\"\n code = fake.password(length=8, special_chars=False, digits=True,\n upper_case=True, lower_case=False)\n if suffix:\n code += suffix\n return code\n\n\ndef create_coupon(data):\n \"\"\"\n This function is used to create a coupon using API call.\n \"\"\"\n return woo_request_helper().post_details('coupons', data,\n expected_status_code=201)\n\n\ndef list_all_products():\n \"\"\"\n This function returns the list of all products from the API.\n \"\"\"\n all_products = []\n max_pages = 1000\n page_num = 1\n while page_num < max_pages:\n param = {'per_page': 100, 'page': page_num}\n rs_api = woo_request_helper().get_details(wc_endpoint='products',\n params=param)\n if rs_api:\n page_num += 1\n all_products.extend(rs_api)\n else:\n break\n return all_products\n\n\ndef get_products_by_id(product_id):\n \"\"\"\n This function is used to get the product by product id using API call.\n \"\"\"\n rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.\n format(product_id))\n return rs_api\n\n\ndef update_random_product_price(product_id, data):\n \"\"\"\n This function is used to update the product by product id and data to update\n using the API call.\n \"\"\"\n return woo_request_helper().put_details(wc_endpoint='products/{}'.\n format(product_id), params=data)\n\n\ndef create_user(data):\n \"\"\"\n This function is used to create the user by user data using API call.\n \"\"\"\n return woo_request_helper().post_details(wc_endpoint='customers',\n params=data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generate_random_email_and_password():\n \"\"\"\n Function to generate random email id and password\n \"\"\"\n email = fake.email()\n password_string = fake.password()\n random_info = {'email': email, 'password': password_string}\n return random_info\n\n\ndef generate_random_coupon_code(suffix=None):\n \"\"\"\n This function generates random coupon codes\n \"\"\"\n code = fake.password(length=8, special_chars=False, digits=True,\n upper_case=True, lower_case=False)\n if suffix:\n code += suffix\n return code\n\n\ndef create_coupon(data):\n \"\"\"\n This function is used to create a coupon using API call.\n \"\"\"\n return woo_request_helper().post_details('coupons', data,\n expected_status_code=201)\n\n\ndef list_all_products():\n \"\"\"\n This function returns the list of all products from the API.\n \"\"\"\n all_products = []\n max_pages = 1000\n page_num = 1\n while page_num < max_pages:\n param = {'per_page': 100, 'page': page_num}\n rs_api = woo_request_helper().get_details(wc_endpoint='products',\n params=param)\n if rs_api:\n page_num += 1\n all_products.extend(rs_api)\n else:\n break\n return all_products\n\n\ndef get_products_by_id(product_id):\n \"\"\"\n This function is used to get the product by product id using API call.\n \"\"\"\n rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.\n format(product_id))\n return rs_api\n\n\ndef update_random_product_price(product_id, data):\n \"\"\"\n This function is used to update the product by product id and data to update\n using the API call.\n \"\"\"\n return woo_request_helper().put_details(wc_endpoint='products/{}'.\n format(product_id), params=data)\n\n\ndef create_user(data):\n \"\"\"\n This function is used to create the user by user data using API call.\n \"\"\"\n return woo_request_helper().post_details(wc_endpoint='customers',\n params=data)\n\n\ndef delete_user_by_id(user_id):\n \"\"\"\n This function is used to delete the user by user id using API call.\n \"\"\"\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.\n format(user_id))\n",
"step-4": "<mask token>\nfake = Faker()\n\n\ndef generate_random_email_and_password():\n \"\"\"\n Function to generate random email id and password\n \"\"\"\n email = fake.email()\n password_string = fake.password()\n random_info = {'email': email, 'password': password_string}\n return random_info\n\n\ndef generate_random_coupon_code(suffix=None):\n \"\"\"\n This function generates random coupon codes\n \"\"\"\n code = fake.password(length=8, special_chars=False, digits=True,\n upper_case=True, lower_case=False)\n if suffix:\n code += suffix\n return code\n\n\ndef create_coupon(data):\n \"\"\"\n This function is used to create a coupon using API call.\n \"\"\"\n return woo_request_helper().post_details('coupons', data,\n expected_status_code=201)\n\n\ndef list_all_products():\n \"\"\"\n This function returns the list of all products from the API.\n \"\"\"\n all_products = []\n max_pages = 1000\n page_num = 1\n while page_num < max_pages:\n param = {'per_page': 100, 'page': page_num}\n rs_api = woo_request_helper().get_details(wc_endpoint='products',\n params=param)\n if rs_api:\n page_num += 1\n all_products.extend(rs_api)\n else:\n break\n return all_products\n\n\ndef get_products_by_id(product_id):\n \"\"\"\n This function is used to get the product by product id using API call.\n \"\"\"\n rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.\n format(product_id))\n return rs_api\n\n\ndef update_random_product_price(product_id, data):\n \"\"\"\n This function is used to update the product by product id and data to update\n using the API call.\n \"\"\"\n return woo_request_helper().put_details(wc_endpoint='products/{}'.\n format(product_id), params=data)\n\n\ndef create_user(data):\n \"\"\"\n This function is used to create the user by user data using API call.\n \"\"\"\n return woo_request_helper().post_details(wc_endpoint='customers',\n params=data)\n\n\ndef delete_user_by_id(user_id):\n \"\"\"\n This function is used to delete the user by user id using API call.\n \"\"\"\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.\n format(user_id))\n",
"step-5": "\"\"\"\nThis is the common util file\n\"\"\"\nfrom faker import Faker\nfrom pytest_practical.helper.api_helpers import woo_request_helper\n\nfake = Faker()\n\n\ndef generate_random_email_and_password():\n \"\"\"\n Function to generate random email id and password\n \"\"\"\n email = fake.email()\n\n password_string = fake.password()\n\n random_info = {\n 'email': email,\n 'password': password_string\n }\n return random_info\n\n\ndef generate_random_coupon_code(suffix=None):\n \"\"\"\n This function generates random coupon codes\n \"\"\"\n code = fake.password(length=8, special_chars=False, digits=True, upper_case=True, lower_case=False)\n if suffix:\n code += suffix\n\n return code\n\n\ndef create_coupon(data):\n \"\"\"\n This function is used to create a coupon using API call.\n \"\"\"\n return woo_request_helper().post_details('coupons', data, expected_status_code=201)\n\n\ndef list_all_products():\n \"\"\"\n This function returns the list of all products from the API.\n \"\"\"\n all_products = []\n max_pages = 1000\n page_num = 1\n while page_num < max_pages:\n\n param = {\n 'per_page': 100,\n 'page': page_num,\n }\n rs_api = woo_request_helper().get_details(wc_endpoint='products', params=param)\n\n if rs_api:\n page_num += 1\n all_products.extend(rs_api)\n else:\n break\n\n return all_products\n\n\ndef get_products_by_id(product_id):\n \"\"\"\n This function is used to get the product by product id using API call.\n \"\"\"\n rs_api = woo_request_helper().get_details(wc_endpoint='products/{}'.format(product_id))\n return rs_api\n\n\ndef update_random_product_price(product_id, data):\n \"\"\"\n This function is used to update the product by product id and data to update\n using the API call.\n \"\"\"\n return woo_request_helper().put_details(wc_endpoint='products/{}'.format(product_id), params=data)\n\n\ndef create_user(data):\n \"\"\"\n This function is used to create the user by user data using API call.\n \"\"\"\n return woo_request_helper().post_details(wc_endpoint='customers', params=data)\n\n\ndef delete_user_by_id(user_id):\n \"\"\"\n This function is used to delete the user by user id using API call.\n \"\"\"\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))\n",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app.config.from_object(config)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config.from_object(config)
queue = Queue()
mq = RabbitMQ(app, queue)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from flask import Flask
import config
from flask_rabbitmq import Queue, RabbitMQ
app = Flask(__name__)
app.config.from_object(config)
queue = Queue()
mq = RabbitMQ(app, queue)
from app import demo
<|reserved_special_token_1|>
#encoding:utf-8
from flask import Flask
import config
from flask_rabbitmq import Queue, RabbitMQ
app = Flask(__name__)
app.config.from_object(config)
queue = Queue()
mq = RabbitMQ(app, queue)
from app import demo
|
flexible
|
{
"blob_id": "ccf9c389a65d1420e87deec2100e37bccdcb5539",
"index": 6323,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config.from_object(config)\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object(config)\nqueue = Queue()\nmq = RabbitMQ(app, queue)\n<mask token>\n",
"step-4": "from flask import Flask\nimport config\nfrom flask_rabbitmq import Queue, RabbitMQ\napp = Flask(__name__)\napp.config.from_object(config)\nqueue = Queue()\nmq = RabbitMQ(app, queue)\nfrom app import demo\n",
"step-5": "#encoding:utf-8\nfrom flask import Flask\nimport config\nfrom flask_rabbitmq import Queue, RabbitMQ\n\napp = Flask(__name__)\napp.config.from_object(config)\n\nqueue = Queue()\nmq = RabbitMQ(app, queue)\n\nfrom app import demo\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getDepths(imgs, img_names, intersectionCoords, stakeValidity,
templateIntersections, upperBorder, tensors, actualTensors,
intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,
imageSummary):
"""
Function to calculate the change in snow depth for each stake using the tensor
from the specified template
Keyword arguments:
imgs -- list of input images
img_names -- list of corresponding image file names
intersectionCoords -- list containing intersection coordinates for input images
stakeValidity -- list indicating which stakes in input images are valid
templateIntersections -- list containing intersection coordinates for template
upperBorder -- upper crop parameter
tensors -- tensors from template image
actualTensors -- tensors calculated for input images
intersectionDist -- list containing distances from blobs to intersection points
for input images
blobDistTemplate -- list containing blob to intersection point distances from
template
debug -- bool flag indicating whether output images should be saved
debug_directory -- directory where output images should be written
image_dates -- list containing dates of images extracted from EXIF data
imageSummary -- dictionary containing information about each run
"""
median_depths = list()
median_depths_est = list()
depth_output = {}
num_images = len(imgs)
depths = dict()
dest = str(debug_directory) + 'snow-depths.xlsx'
workbook = xlsxwriter.Workbook(dest)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, len(tensors) + 3, 25)
cell_format = workbook.add_format()
cell_format.set_align('center')
worksheet.write(0, 0, 'Image', cell_format)
worksheet.write(0, 1, 'Date', cell_format)
worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)
worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)
for i, j in enumerate(tensors):
worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)
row = 1
col = 0
iterator = 0
for img_ in tqdm.tqdm(imgs):
if debug:
img_overlay = img_.copy()
depths_stake = list()
estimate_stake = list()
img_name = img_names[iterator]
col = 0
worksheet.write(row, col, img_name, cell_format)
if isinstance(image_dates[iterator], datetime.datetime):
worksheet.write(row, col + 1, image_dates[iterator].strftime(
'%x %X'), cell_format)
col = 2
coords_stake = intersectionCoords[img_name]
intersection_dist_stake = intersectionDist[img_name]
for i, stake in enumerate(coords_stake):
if stakeValidity[img_name][i] and stake['average'][1] != False:
if debug:
cv2.circle(img_overlay, (int(templateIntersections[i][0
]), int(templateIntersections[i][1]) - upperBorder),
5, (255, 0, 0), 3)
cv2.circle(img_overlay, (int(stake['average'][0]), int(
stake['average'][1])), 5, (0, 255, 0), 2)
tensor = actualTensors[img_name][i] if actualTensors[img_name][
i] != True else tensors[i]
depth_change = (templateIntersections[i][1] - upperBorder -
stake['average'][1]) * tensor
distances_stake = list()
for w, x in enumerate(intersection_dist_stake[i]):
if x != False:
distances_stake.append((abs(blobDistTemplate[i][w]) -
abs(x)) * tensor)
distance_estimate = statistics.median(distances_stake) if len(
distances_stake) > 0 else 0
worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,
distance_estimate), cell_format)
depths_stake.append(depth_change)
estimate_stake.append(distance_estimate)
else:
if stakeValidity[img_name][i]:
worksheet.write(row, col + i, 'Not Found', cell_format)
else:
worksheet.write(row, col + i, 'Invalid Stake', cell_format)
depths_stake.append(False)
estimate_stake.append(False)
if debug:
cv2.imwrite(debug_directory + img_name, img_overlay)
depths[img_name] = depths_stake
valid_depths = [x for x in depths_stake if x != False]
valid_estimates = [x for x in estimate_stake if x != False]
if len(valid_depths) > 0:
median = statistics.median(valid_depths)
median_est = statistics.median(valid_estimates)
else:
median = False
median_est = False
median_depths.append(median)
median_depths_est.append(median_est)
if median != False and median > 0:
worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format
)
worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,
cell_format)
elif median != False:
worksheet.write(row, len(tensors) + 2, '0.0', cell_format)
worksheet.write(row, len(tensors) + 3, '0.0', cell_format)
else:
worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)
worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)
row += 1
iterator += 1
imageSummary[img_name][' '] = ''
imageSummary[img_name]['Stake (Depth Calculation)'
] = 'Depth (mm) Estimate (mm)'
for e, depth in enumerate(depths_stake):
if isinstance(depth, float):
imageSummary[img_name][' %d ' % (e + 1)
] = '%0.2f %0.2f ' % (depth,
estimate_stake[e])
else:
imageSummary[img_name][' %d ' % (e + 1)
] = '%s %s ' % ('n/a', 'n/a')
workbook.close()
filterSet = zip(median_depths, median_depths_est, image_dates)
filterSet = [(x, y, z) for x, y, z in filterSet if x != False]
median_depths, median_depths_est, image_dates = zip(*filterSet)
median_depths = np.asarray(median_depths).clip(0)
median_depths_est = np.asarray(median_depths_est).clip(0)
fig, ax = plt.subplots(1)
plt.plot(image_dates, median_depths)
plt.plot(image_dates, median_depths_est)
plt.gcf().autofmt_xdate()
plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')
ax.set_xlabel('Date')
ax.set_ylabel('Snow Depth (mm)')
plt.xticks(rotation=75)
plt.tight_layout()
plt.savefig(debug_directory + 'depth-graph.jpg')
plt.close()
return depths, imageSummary
<|reserved_special_token_1|>
import cv2
import xlsxwriter
import statistics
from matplotlib import pyplot as plt
import math
import tqdm
import numpy as np
import datetime
def getDepths(imgs, img_names, intersectionCoords, stakeValidity,
templateIntersections, upperBorder, tensors, actualTensors,
intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,
imageSummary):
"""
Function to calculate the change in snow depth for each stake using the tensor
from the specified template
Keyword arguments:
imgs -- list of input images
img_names -- list of corresponding image file names
intersectionCoords -- list containing intersection coordinates for input images
stakeValidity -- list indicating which stakes in input images are valid
templateIntersections -- list containing intersection coordinates for template
upperBorder -- upper crop parameter
tensors -- tensors from template image
actualTensors -- tensors calculated for input images
intersectionDist -- list containing distances from blobs to intersection points
for input images
blobDistTemplate -- list containing blob to intersection point distances from
template
debug -- bool flag indicating whether output images should be saved
debug_directory -- directory where output images should be written
image_dates -- list containing dates of images extracted from EXIF data
imageSummary -- dictionary containing information about each run
"""
median_depths = list()
median_depths_est = list()
depth_output = {}
num_images = len(imgs)
depths = dict()
dest = str(debug_directory) + 'snow-depths.xlsx'
workbook = xlsxwriter.Workbook(dest)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, len(tensors) + 3, 25)
cell_format = workbook.add_format()
cell_format.set_align('center')
worksheet.write(0, 0, 'Image', cell_format)
worksheet.write(0, 1, 'Date', cell_format)
worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)
worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)
for i, j in enumerate(tensors):
worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)
row = 1
col = 0
iterator = 0
for img_ in tqdm.tqdm(imgs):
if debug:
img_overlay = img_.copy()
depths_stake = list()
estimate_stake = list()
img_name = img_names[iterator]
col = 0
worksheet.write(row, col, img_name, cell_format)
if isinstance(image_dates[iterator], datetime.datetime):
worksheet.write(row, col + 1, image_dates[iterator].strftime(
'%x %X'), cell_format)
col = 2
coords_stake = intersectionCoords[img_name]
intersection_dist_stake = intersectionDist[img_name]
for i, stake in enumerate(coords_stake):
if stakeValidity[img_name][i] and stake['average'][1] != False:
if debug:
cv2.circle(img_overlay, (int(templateIntersections[i][0
]), int(templateIntersections[i][1]) - upperBorder),
5, (255, 0, 0), 3)
cv2.circle(img_overlay, (int(stake['average'][0]), int(
stake['average'][1])), 5, (0, 255, 0), 2)
tensor = actualTensors[img_name][i] if actualTensors[img_name][
i] != True else tensors[i]
depth_change = (templateIntersections[i][1] - upperBorder -
stake['average'][1]) * tensor
distances_stake = list()
for w, x in enumerate(intersection_dist_stake[i]):
if x != False:
distances_stake.append((abs(blobDistTemplate[i][w]) -
abs(x)) * tensor)
distance_estimate = statistics.median(distances_stake) if len(
distances_stake) > 0 else 0
worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,
distance_estimate), cell_format)
depths_stake.append(depth_change)
estimate_stake.append(distance_estimate)
else:
if stakeValidity[img_name][i]:
worksheet.write(row, col + i, 'Not Found', cell_format)
else:
worksheet.write(row, col + i, 'Invalid Stake', cell_format)
depths_stake.append(False)
estimate_stake.append(False)
if debug:
cv2.imwrite(debug_directory + img_name, img_overlay)
depths[img_name] = depths_stake
valid_depths = [x for x in depths_stake if x != False]
valid_estimates = [x for x in estimate_stake if x != False]
if len(valid_depths) > 0:
median = statistics.median(valid_depths)
median_est = statistics.median(valid_estimates)
else:
median = False
median_est = False
median_depths.append(median)
median_depths_est.append(median_est)
if median != False and median > 0:
worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format
)
worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,
cell_format)
elif median != False:
worksheet.write(row, len(tensors) + 2, '0.0', cell_format)
worksheet.write(row, len(tensors) + 3, '0.0', cell_format)
else:
worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)
worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)
row += 1
iterator += 1
imageSummary[img_name][' '] = ''
imageSummary[img_name]['Stake (Depth Calculation)'
] = 'Depth (mm) Estimate (mm)'
for e, depth in enumerate(depths_stake):
if isinstance(depth, float):
imageSummary[img_name][' %d ' % (e + 1)
] = '%0.2f %0.2f ' % (depth,
estimate_stake[e])
else:
imageSummary[img_name][' %d ' % (e + 1)
] = '%s %s ' % ('n/a', 'n/a')
workbook.close()
filterSet = zip(median_depths, median_depths_est, image_dates)
filterSet = [(x, y, z) for x, y, z in filterSet if x != False]
median_depths, median_depths_est, image_dates = zip(*filterSet)
median_depths = np.asarray(median_depths).clip(0)
median_depths_est = np.asarray(median_depths_est).clip(0)
fig, ax = plt.subplots(1)
plt.plot(image_dates, median_depths)
plt.plot(image_dates, median_depths_est)
plt.gcf().autofmt_xdate()
plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')
ax.set_xlabel('Date')
ax.set_ylabel('Snow Depth (mm)')
plt.xticks(rotation=75)
plt.tight_layout()
plt.savefig(debug_directory + 'depth-graph.jpg')
plt.close()
return depths, imageSummary
<|reserved_special_token_1|>
# import necessary modules
import cv2
import xlsxwriter
import statistics
from matplotlib import pyplot as plt
import math
import tqdm
import numpy as np
import datetime
def getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,
upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,
image_dates, imageSummary):
"""
Function to calculate the change in snow depth for each stake using the tensor
from the specified template
Keyword arguments:
imgs -- list of input images
img_names -- list of corresponding image file names
intersectionCoords -- list containing intersection coordinates for input images
stakeValidity -- list indicating which stakes in input images are valid
templateIntersections -- list containing intersection coordinates for template
upperBorder -- upper crop parameter
tensors -- tensors from template image
actualTensors -- tensors calculated for input images
intersectionDist -- list containing distances from blobs to intersection points
for input images
blobDistTemplate -- list containing blob to intersection point distances from
template
debug -- bool flag indicating whether output images should be saved
debug_directory -- directory where output images should be written
image_dates -- list containing dates of images extracted from EXIF data
imageSummary -- dictionary containing information about each run
"""
# list containing median depths for each image
median_depths = list()
median_depths_est = list()
# contains output data for JSON file
depth_output = {}
# num of images
num_images = len(imgs)
# create output dictionary for images
depths = dict()
# create excel workbook and add worksheet
dest = str(debug_directory) + 'snow-depths.xlsx'
workbook = xlsxwriter.Workbook(dest)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, len(tensors) + 3, 25)
# create format
cell_format = workbook.add_format()
cell_format.set_align('center')
# add titles
worksheet.write(0, 0, "Image", cell_format)
worksheet.write(0, 1, "Date", cell_format)
worksheet.write(0, len(tensors) + 2, "Median Depth (mm)", cell_format)
worksheet.write(0, len(tensors) + 3, "Median Estimate (mm)", cell_format)
for i, j in enumerate(tensors):
worksheet.write(0, i+2, ("Stake %s" % str(i)), cell_format)
# start from the first cell
row = 1
col = 0
# image iterator
iterator = 0
# iterate through images
for img_ in tqdm.tqdm(imgs):
# create an image to overlay points on if debugging
if(debug):
img_overlay = img_.copy()
# list to hold calculated depths
depths_stake = list()
estimate_stake = list()
# get image name
img_name = img_names[iterator]
# reset column
col = 0
# write to excel file
worksheet.write(row, col, img_name, cell_format)
if isinstance(image_dates[iterator], datetime.datetime):
worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)
col = 2
# get intersection coordiantes
coords_stake = intersectionCoords[img_name]
# get blob intersection distances
intersection_dist_stake = intersectionDist[img_name]
# iterate through stakes in image
for i, stake in enumerate(coords_stake):
# if stake is valid and intersection point was found
if stakeValidity[img_name][i] and stake["average"][1] != False:
# add reference circles to output image if debugging
# shows intersection point of image with reference to template
if(debug):
cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)
cv2.circle(img_overlay, (int(stake["average"][0]), int(stake["average"][1])), 5, (0,255,0), 2)
# calculate change in snow depth in mm
tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]
depth_change = ((templateIntersections[i][1] - upperBorder) - stake["average"][1]) * tensor
# calculate change in snow depth using blob distances
distances_stake = list()
for w, x in enumerate(intersection_dist_stake[i]):
if x != False:
distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)
distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0
# write to excel file
worksheet.write(row, col + i, "%.2f (%.2f)" % (depth_change, distance_estimate), cell_format)
# add to list
depths_stake.append(depth_change)
estimate_stake.append(distance_estimate)
# if stake wasn't valid or intersection point not found
else:
# if stake was valid
if stakeValidity[img_name][i]:
worksheet.write(row, col + i, "Not Found", cell_format)
# invalid stake
else:
worksheet.write(row, col + i, "Invalid Stake", cell_format)
# append false to array
depths_stake.append(False)
estimate_stake.append(False)
# output debug image
if(debug):
cv2.imwrite(debug_directory + img_name, img_overlay)
# add list to dictionary
depths[img_name] = depths_stake
# determine median depth
valid_depths = [x for x in depths_stake if x != False]
valid_estimates = [x for x in estimate_stake if x != False]
if(len(valid_depths) > 0):
median = statistics.median(valid_depths)
median_est = statistics.median(valid_estimates)
else:
median = False
median_est = False
# add to median depth list
median_depths.append(median)
median_depths_est.append(median_est)
# write median to excel file
if median != False and median > 0:
worksheet.write(row, len(tensors) + 2, "%.2f" % median, cell_format)
worksheet.write(row, len(tensors) + 3, "%.2f" % median_est, cell_format)
elif median != False:
worksheet.write(row, len(tensors) + 2, "0.0", cell_format)
worksheet.write(row, len(tensors) + 3, "0.0", cell_format)
else:
worksheet.write(row, len(tensors) + 2, "n/a", cell_format)
worksheet.write(row, len(tensors) + 3, "n/a", cell_format)
# increment row
row += 1
# increment iterator
iterator += 1
# update image summary
imageSummary[img_name][" "] = ""
imageSummary[img_name]["Stake (Depth Calculation)"] = "Depth (mm) Estimate (mm)"
for e, depth in enumerate(depths_stake):
if isinstance(depth, float):
imageSummary[img_name][" %d " % (e+1)] = "%0.2f %0.2f " % \
(depth, estimate_stake[e])
else:
imageSummary[img_name][" %d " % (e+1)] = "%s %s " % \
("n/a", "n/a")
# close workbook
workbook.close()
# remove negative values
filterSet = zip(median_depths, median_depths_est, image_dates)
filterSet = [(x, y, z) for x, y, z in filterSet if x != False]
median_depths, median_depths_est, image_dates = zip(*filterSet)
median_depths = np.asarray(median_depths).clip(0)
median_depths_est = np.asarray(median_depths_est).clip(0)
# generate plot
fig,ax = plt.subplots(1)
plt.plot(image_dates, median_depths)
plt.plot(image_dates, median_depths_est)
plt.gcf().autofmt_xdate()
plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')
ax.set_xlabel("Date")
ax.set_ylabel("Snow Depth (mm)")
plt.xticks(rotation=75)
plt.tight_layout()
# save figure
plt.savefig(debug_directory + "depth-graph.jpg")
plt.close()
# return dictionary containing snow depth changes
return depths, imageSummary
|
flexible
|
{
"blob_id": "24a538dcc885b37eb0147a1ee089189f11b20f8a",
"index": 7945,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity,\n templateIntersections, upperBorder, tensors, actualTensors,\n intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,\n imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n median_depths = list()\n median_depths_est = list()\n depth_output = {}\n num_images = len(imgs)\n depths = dict()\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n worksheet.write(0, 0, 'Image', cell_format)\n worksheet.write(0, 1, 'Date', cell_format)\n worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)\n worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)\n row = 1\n col = 0\n iterator = 0\n for img_ in tqdm.tqdm(imgs):\n if debug:\n img_overlay = img_.copy()\n depths_stake = list()\n estimate_stake = list()\n img_name = img_names[iterator]\n col = 0\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime(\n '%x %X'), cell_format)\n col = 2\n coords_stake = intersectionCoords[img_name]\n intersection_dist_stake = intersectionDist[img_name]\n for i, stake in enumerate(coords_stake):\n if stakeValidity[img_name][i] and stake['average'][1] != False:\n if debug:\n cv2.circle(img_overlay, (int(templateIntersections[i][0\n ]), int(templateIntersections[i][1]) - upperBorder),\n 5, (255, 0, 0), 3)\n cv2.circle(img_overlay, (int(stake['average'][0]), int(\n stake['average'][1])), 5, (0, 255, 0), 2)\n tensor = actualTensors[img_name][i] if actualTensors[img_name][\n i] != True else tensors[i]\n depth_change = (templateIntersections[i][1] - upperBorder -\n stake['average'][1]) * tensor\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) -\n abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(\n distances_stake) > 0 else 0\n worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,\n distance_estimate), cell_format)\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n else:\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, 'Not Found', cell_format)\n else:\n worksheet.write(row, col + i, 'Invalid Stake', cell_format)\n depths_stake.append(False)\n estimate_stake.append(False)\n if debug:\n cv2.imwrite(debug_directory + img_name, img_overlay)\n depths[img_name] = depths_stake\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n if len(valid_depths) > 0:\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n median_depths.append(median)\n median_depths_est.append(median_est)\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format\n )\n worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,\n cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, '0.0', cell_format)\n worksheet.write(row, len(tensors) + 3, '0.0', cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)\n worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)\n row += 1\n iterator += 1\n imageSummary[img_name][' '] = ''\n imageSummary[img_name]['Stake (Depth Calculation)'\n ] = 'Depth (mm) Estimate (mm)'\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%0.2f %0.2f ' % (depth,\n estimate_stake[e])\n else:\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%s %s ' % ('n/a', 'n/a')\n workbook.close()\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n fig, ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel('Date')\n ax.set_ylabel('Snow Depth (mm)')\n plt.xticks(rotation=75)\n plt.tight_layout()\n plt.savefig(debug_directory + 'depth-graph.jpg')\n plt.close()\n return depths, imageSummary\n",
"step-3": "import cv2\nimport xlsxwriter\nimport statistics\nfrom matplotlib import pyplot as plt\nimport math\nimport tqdm\nimport numpy as np\nimport datetime\n\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity,\n templateIntersections, upperBorder, tensors, actualTensors,\n intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,\n imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n median_depths = list()\n median_depths_est = list()\n depth_output = {}\n num_images = len(imgs)\n depths = dict()\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n worksheet.write(0, 0, 'Image', cell_format)\n worksheet.write(0, 1, 'Date', cell_format)\n worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)\n worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)\n row = 1\n col = 0\n iterator = 0\n for img_ in tqdm.tqdm(imgs):\n if debug:\n img_overlay = img_.copy()\n depths_stake = list()\n estimate_stake = list()\n img_name = img_names[iterator]\n col = 0\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime(\n '%x %X'), cell_format)\n col = 2\n coords_stake = intersectionCoords[img_name]\n intersection_dist_stake = intersectionDist[img_name]\n for i, stake in enumerate(coords_stake):\n if stakeValidity[img_name][i] and stake['average'][1] != False:\n if debug:\n cv2.circle(img_overlay, (int(templateIntersections[i][0\n ]), int(templateIntersections[i][1]) - upperBorder),\n 5, (255, 0, 0), 3)\n cv2.circle(img_overlay, (int(stake['average'][0]), int(\n stake['average'][1])), 5, (0, 255, 0), 2)\n tensor = actualTensors[img_name][i] if actualTensors[img_name][\n i] != True else tensors[i]\n depth_change = (templateIntersections[i][1] - upperBorder -\n stake['average'][1]) * tensor\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) -\n abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(\n distances_stake) > 0 else 0\n worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,\n distance_estimate), cell_format)\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n else:\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, 'Not Found', cell_format)\n else:\n worksheet.write(row, col + i, 'Invalid Stake', cell_format)\n depths_stake.append(False)\n estimate_stake.append(False)\n if debug:\n cv2.imwrite(debug_directory + img_name, img_overlay)\n depths[img_name] = depths_stake\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n if len(valid_depths) > 0:\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n median_depths.append(median)\n median_depths_est.append(median_est)\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format\n )\n worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,\n cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, '0.0', cell_format)\n worksheet.write(row, len(tensors) + 3, '0.0', cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)\n worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)\n row += 1\n iterator += 1\n imageSummary[img_name][' '] = ''\n imageSummary[img_name]['Stake (Depth Calculation)'\n ] = 'Depth (mm) Estimate (mm)'\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%0.2f %0.2f ' % (depth,\n estimate_stake[e])\n else:\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%s %s ' % ('n/a', 'n/a')\n workbook.close()\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n fig, ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel('Date')\n ax.set_ylabel('Snow Depth (mm)')\n plt.xticks(rotation=75)\n plt.tight_layout()\n plt.savefig(debug_directory + 'depth-graph.jpg')\n plt.close()\n return depths, imageSummary\n",
"step-4": "# import necessary modules\nimport cv2\nimport xlsxwriter\nimport statistics\nfrom matplotlib import pyplot as plt\nimport math\nimport tqdm\nimport numpy as np\nimport datetime\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,\n upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,\n image_dates, imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n\n # list containing median depths for each image\n median_depths = list()\n median_depths_est = list()\n\n # contains output data for JSON file\n depth_output = {}\n\n # num of images\n num_images = len(imgs)\n\n # create output dictionary for images\n depths = dict()\n\n # create excel workbook and add worksheet\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n\n # create format\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n\n # add titles\n worksheet.write(0, 0, \"Image\", cell_format)\n worksheet.write(0, 1, \"Date\", cell_format)\n worksheet.write(0, len(tensors) + 2, \"Median Depth (mm)\", cell_format)\n worksheet.write(0, len(tensors) + 3, \"Median Estimate (mm)\", cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i+2, (\"Stake %s\" % str(i)), cell_format)\n\n # start from the first cell\n row = 1\n col = 0\n\n # image iterator\n iterator = 0\n\n # iterate through images\n for img_ in tqdm.tqdm(imgs):\n # create an image to overlay points on if debugging\n if(debug):\n img_overlay = img_.copy()\n\n # list to hold calculated depths\n depths_stake = list()\n estimate_stake = list()\n\n # get image name\n img_name = img_names[iterator]\n\n # reset column\n col = 0\n\n # write to excel file\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)\n col = 2\n\n # get intersection coordiantes\n coords_stake = intersectionCoords[img_name]\n\n # get blob intersection distances\n intersection_dist_stake = intersectionDist[img_name]\n\n # iterate through stakes in image\n for i, stake in enumerate(coords_stake):\n # if stake is valid and intersection point was found\n if stakeValidity[img_name][i] and stake[\"average\"][1] != False:\n # add reference circles to output image if debugging\n # shows intersection point of image with reference to template\n if(debug):\n cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)\n cv2.circle(img_overlay, (int(stake[\"average\"][0]), int(stake[\"average\"][1])), 5, (0,255,0), 2)\n\n # calculate change in snow depth in mm\n tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]\n depth_change = ((templateIntersections[i][1] - upperBorder) - stake[\"average\"][1]) * tensor\n\n # calculate change in snow depth using blob distances\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0\n\n # write to excel file\n worksheet.write(row, col + i, \"%.2f (%.2f)\" % (depth_change, distance_estimate), cell_format)\n\n # add to list\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n\n # if stake wasn't valid or intersection point not found\n else:\n # if stake was valid\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, \"Not Found\", cell_format)\n # invalid stake\n else:\n worksheet.write(row, col + i, \"Invalid Stake\", cell_format)\n\n # append false to array\n depths_stake.append(False)\n estimate_stake.append(False)\n\n # output debug image\n if(debug):\n cv2.imwrite(debug_directory + img_name, img_overlay)\n\n # add list to dictionary\n depths[img_name] = depths_stake\n\n # determine median depth\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n\n if(len(valid_depths) > 0):\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n\n # add to median depth list\n median_depths.append(median)\n median_depths_est.append(median_est)\n\n # write median to excel file\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, \"%.2f\" % median, cell_format)\n worksheet.write(row, len(tensors) + 3, \"%.2f\" % median_est, cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, \"0.0\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"0.0\", cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, \"n/a\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"n/a\", cell_format)\n\n # increment row\n row += 1\n\n # increment iterator\n iterator += 1\n\n # update image summary\n imageSummary[img_name][\" \"] = \"\"\n imageSummary[img_name][\"Stake (Depth Calculation)\"] = \"Depth (mm) Estimate (mm)\"\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][\" %d \" % (e+1)] = \"%0.2f %0.2f \" % \\\n (depth, estimate_stake[e])\n else:\n imageSummary[img_name][\" %d \" % (e+1)] = \"%s %s \" % \\\n (\"n/a\", \"n/a\")\n\n # close workbook\n workbook.close()\n\n # remove negative values\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n\n # generate plot\n fig,ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Snow Depth (mm)\")\n plt.xticks(rotation=75)\n plt.tight_layout()\n\n # save figure\n plt.savefig(debug_directory + \"depth-graph.jpg\")\n plt.close()\n\n # return dictionary containing snow depth changes\n return depths, imageSummary\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .dla import get_network as get_dla
from lib.utils.tless import tless_config
_network_factory = {'dla': get_dla}
def get_network(cfg):
arch = cfg.network
heads = cfg.heads
head_conv = cfg.head_conv
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
get_model = _network_factory[arch]
network = get_model(num_layers, heads, head_conv, tless_config.
down_ratio, cfg.det_dir)
return network
|
normal
|
{
"blob_id": "7df94c86ff837acf0f2a78fe1f99919c31bdcb9b",
"index": 4881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_network(cfg):\n arch = cfg.network\n heads = cfg.heads\n head_conv = cfg.head_conv\n num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0\n arch = arch[:arch.find('_')] if '_' in arch else arch\n get_model = _network_factory[arch]\n network = get_model(num_layers, heads, head_conv, tless_config.\n down_ratio, cfg.det_dir)\n return network\n",
"step-3": "<mask token>\n_network_factory = {'dla': get_dla}\n\n\ndef get_network(cfg):\n arch = cfg.network\n heads = cfg.heads\n head_conv = cfg.head_conv\n num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0\n arch = arch[:arch.find('_')] if '_' in arch else arch\n get_model = _network_factory[arch]\n network = get_model(num_layers, heads, head_conv, tless_config.\n down_ratio, cfg.det_dir)\n return network\n",
"step-4": "from .dla import get_network as get_dla\nfrom lib.utils.tless import tless_config\n_network_factory = {'dla': get_dla}\n\n\ndef get_network(cfg):\n arch = cfg.network\n heads = cfg.heads\n head_conv = cfg.head_conv\n num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0\n arch = arch[:arch.find('_')] if '_' in arch else arch\n get_model = _network_factory[arch]\n network = get_model(num_layers, heads, head_conv, tless_config.\n down_ratio, cfg.det_dir)\n return network\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import base64
import string
def hexStringtoBytes(hexstring):
byteArray = bytes.fromhex(hexstring)
return byteArray
def xorBytes(bytes1, bytes2):
xored = bytes([x^bytes2[i] for i,x in enumerate(bytes1)])
return xored
def xorAgainstCharacter(byteArray, character):
str2 = [ord(character)] * len(byteArray)
return xorBytes(byteArray,str2)
def scoreString(input):
arr = [(chr(x) in string.printable) for x in input]
return arr.count(True)
if __name__ == "__main__":
hexstring = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'
bytes1 = hexStringtoBytes(hexstring)
scores = []
for x in string.printable:
temp = xorAgainstCharacter(bytes1, x)
print(str(x), str(temp))
scores.append(scoreString(temp))
|
normal
|
{
"blob_id": "a32fb683f8d46f901e8dcd2d075ace22ee81e076",
"index": 451,
"step-1": "<mask token>\n\n\ndef hexStringtoBytes(hexstring):\n byteArray = bytes.fromhex(hexstring)\n return byteArray\n\n\ndef xorBytes(bytes1, bytes2):\n xored = bytes([(x ^ bytes2[i]) for i, x in enumerate(bytes1)])\n return xored\n\n\n<mask token>\n\n\ndef scoreString(input):\n arr = [(chr(x) in string.printable) for x in input]\n return arr.count(True)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef hexStringtoBytes(hexstring):\n byteArray = bytes.fromhex(hexstring)\n return byteArray\n\n\ndef xorBytes(bytes1, bytes2):\n xored = bytes([(x ^ bytes2[i]) for i, x in enumerate(bytes1)])\n return xored\n\n\ndef xorAgainstCharacter(byteArray, character):\n str2 = [ord(character)] * len(byteArray)\n return xorBytes(byteArray, str2)\n\n\ndef scoreString(input):\n arr = [(chr(x) in string.printable) for x in input]\n return arr.count(True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef hexStringtoBytes(hexstring):\n byteArray = bytes.fromhex(hexstring)\n return byteArray\n\n\ndef xorBytes(bytes1, bytes2):\n xored = bytes([(x ^ bytes2[i]) for i, x in enumerate(bytes1)])\n return xored\n\n\ndef xorAgainstCharacter(byteArray, character):\n str2 = [ord(character)] * len(byteArray)\n return xorBytes(byteArray, str2)\n\n\ndef scoreString(input):\n arr = [(chr(x) in string.printable) for x in input]\n return arr.count(True)\n\n\nif __name__ == '__main__':\n hexstring = (\n '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736')\n bytes1 = hexStringtoBytes(hexstring)\n scores = []\n for x in string.printable:\n temp = xorAgainstCharacter(bytes1, x)\n print(str(x), str(temp))\n scores.append(scoreString(temp))\n",
"step-4": "import base64\nimport string\n\n\ndef hexStringtoBytes(hexstring):\n byteArray = bytes.fromhex(hexstring)\n return byteArray\n\n\ndef xorBytes(bytes1, bytes2):\n xored = bytes([(x ^ bytes2[i]) for i, x in enumerate(bytes1)])\n return xored\n\n\ndef xorAgainstCharacter(byteArray, character):\n str2 = [ord(character)] * len(byteArray)\n return xorBytes(byteArray, str2)\n\n\ndef scoreString(input):\n arr = [(chr(x) in string.printable) for x in input]\n return arr.count(True)\n\n\nif __name__ == '__main__':\n hexstring = (\n '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736')\n bytes1 = hexStringtoBytes(hexstring)\n scores = []\n for x in string.printable:\n temp = xorAgainstCharacter(bytes1, x)\n print(str(x), str(temp))\n scores.append(scoreString(temp))\n",
"step-5": "import base64\r\nimport string\r\ndef hexStringtoBytes(hexstring):\r\n byteArray = bytes.fromhex(hexstring)\r\n return byteArray\r\n\r\ndef xorBytes(bytes1, bytes2):\r\n xored = bytes([x^bytes2[i] for i,x in enumerate(bytes1)])\r\n return xored\r\n\r\ndef xorAgainstCharacter(byteArray, character):\r\n str2 = [ord(character)] * len(byteArray)\r\n return xorBytes(byteArray,str2)\r\n\r\ndef scoreString(input):\r\n arr = [(chr(x) in string.printable) for x in input]\r\n return arr.count(True)\r\n\r\nif __name__ == \"__main__\":\r\n hexstring = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'\r\n bytes1 = hexStringtoBytes(hexstring)\r\n scores = []\r\n for x in string.printable:\r\n temp = xorAgainstCharacter(bytes1, x)\r\n print(str(x), str(temp))\r\n scores.append(scoreString(temp))\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Vector:
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace(
'"', '""')))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def save(self):
self._conn.commit()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Vector:
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace(
'"', '""')))
<|reserved_special_token_0|>
def __getitem__(self, index):
self._cur.execute(
"""
SELECT * FROM "{}"
WHERE ID = ?;
"""
.format(self._name.replace('"', '""')), (index,))
try:
return self._cur.fetchall()[0][1]
except Exception as e:
print(e)
return None
def get_multiple(self, keys):
print(keys)
if len(keys) == 0:
return []
keys = [(key,) for key in keys]
print(keys)
self._cur.executemany(
"""
SELECT * FROM "{}"
WHERE ID = ?;
"""
.format(self._name.replace('"', '""')), keys)
try:
a = [val[1] for val in self._cur.fetchall()]
print(a)
return a
except Exception as e:
print(e)
return []
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
VEC_TYPES = [
"""
CREATE TABLE "{}"
(ID TEXT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
"""
,
"""
CREATE TABLE "{}"
(ID INT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
"""
]
class Vector:
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace(
'"', '""')))
def __setitem__(self, index, edges):
try:
self._conn.execute(
"""
INSERT INTO "{}" (ID, num)
VALUES (?, ?);
"""
.format(self._name.replace('"', '""')), (index, edges))
except Exception as e:
print(e)
print('Update Failed')
def __getitem__(self, index):
self._cur.execute(
"""
SELECT * FROM "{}"
WHERE ID = ?;
"""
.format(self._name.replace('"', '""')), (index,))
try:
return self._cur.fetchall()[0][1]
except Exception as e:
print(e)
return None
def get_multiple(self, keys):
print(keys)
if len(keys) == 0:
return []
keys = [(key,) for key in keys]
print(keys)
self._cur.executemany(
"""
SELECT * FROM "{}"
WHERE ID = ?;
"""
.format(self._name.replace('"', '""')), keys)
try:
a = [val[1] for val in self._cur.fetchall()]
print(a)
return a
except Exception as e:
print(e)
return []
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import json
import sqlite3
import time
import shelve
import os
from constants import *
VEC_TYPES = [
"""
CREATE TABLE "{}"
(ID TEXT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
"""
,
"""
CREATE TABLE "{}"
(ID INT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
"""
]
class Vector:
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace(
'"', '""')))
def __setitem__(self, index, edges):
try:
self._conn.execute(
"""
INSERT INTO "{}" (ID, num)
VALUES (?, ?);
"""
.format(self._name.replace('"', '""')), (index, edges))
except Exception as e:
print(e)
print('Update Failed')
def __getitem__(self, index):
self._cur.execute(
"""
SELECT * FROM "{}"
WHERE ID = ?;
"""
.format(self._name.replace('"', '""')), (index,))
try:
return self._cur.fetchall()[0][1]
except Exception as e:
print(e)
return None
def get_multiple(self, keys):
print(keys)
if len(keys) == 0:
return []
keys = [(key,) for key in keys]
print(keys)
self._cur.executemany(
"""
SELECT * FROM "{}"
WHERE ID = ?;
"""
.format(self._name.replace('"', '""')), keys)
try:
a = [val[1] for val in self._cur.fetchall()]
print(a)
return a
except Exception as e:
print(e)
return []
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import json
import sqlite3
import time
import shelve
import os
from constants import *
VEC_TYPES = [
'''
CREATE TABLE "{}"
(ID TEXT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
''',
'''
CREATE TABLE "{}"
(ID INT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
'''
]
class Vector():
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
# check if table exists, if not create TABLE
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace('"', '""')))
def __setitem__(self, index, edges):
try:
self._conn.execute(
"""
INSERT INTO "{}" (ID, num)
VALUES (?, ?);
""".format(self._name.replace('"', '""')), (index, edges)
)
except Exception as e:
print(e)
print("Update Failed")
def __getitem__(self, index):
self._cur.execute(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), (index,)
)
try:
return self._cur.fetchall()[0][1]
except Exception as e:
print(e)
return None
def get_multiple(self, keys):
print(keys)
if len(keys) == 0:
return []
keys = [(key,) for key in keys]
print(keys)
self._cur.executemany(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), keys
)
try:
a = [val[1] for val in self._cur.fetchall()]
print(a)
return a
except Exception as e:
print(e)
return []
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
"""
vec = Vector("yoav_table", 0, EDGES_VECTOR_PATH)
print(vec[0])
vec[0] = "yo"
print(vec[0])
vec.save()
"""
|
flexible
|
{
"blob_id": "0a6cb6d3fad09ab7f0e19b6c79965315c0e0d634",
"index": 4793,
"step-1": "<mask token>\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n <mask token>\n <mask token>\n <mask token>\n\n def save(self):\n self._conn.commit()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n <mask token>\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nVEC_TYPES = [\n \"\"\"\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ,\n \"\"\"\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ]\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index, edges))\n except Exception as e:\n print(e)\n print('Update Failed')\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-4": "import json\nimport sqlite3\nimport time\nimport shelve\nimport os\nfrom constants import *\nVEC_TYPES = [\n \"\"\"\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ,\n \"\"\"\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ]\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index, edges))\n except Exception as e:\n print(e)\n print('Update Failed')\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-5": "import json\nimport sqlite3\nimport time\nimport shelve\nimport os\n\nfrom constants import *\n\n\nVEC_TYPES = [\n '''\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n ''',\n '''\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n '''\n]\n\n\nclass Vector():\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n\n # check if table exists, if not create TABLE\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace('\"', '\"\"')))\n\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\".format(self._name.replace('\"', '\"\"')), (index, edges)\n )\n except Exception as e:\n print(e)\n print(\"Update Failed\")\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\".format(self._name.replace('\"', '\"\"')), (index,)\n )\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\".format(self._name.replace('\"', '\"\"')), keys\n )\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\"\"\"\nvec = Vector(\"yoav_table\", 0, EDGES_VECTOR_PATH)\nprint(vec[0])\nvec[0] = \"yo\"\nprint(vec[0])\nvec.save()\n\n\"\"\"",
"step-ids": [
3,
6,
8,
9,
10
]
}
|
[
3,
6,
8,
9,
10
] |
import sys
from elftools.elf.elffile import ELFFile
from capstone import *
def process_file(filename):
with open(filename, 'rb') as f:
elffile = ELFFile(f)
code = elffile.get_section_by_name('.text')
rodata = elffile.get_section_by_name('.rodata')
plt = elffile.get_section_by_name('.plt')
data = elffile.get_section_by_name('.data')
bss = elffile.get_section_by_name('.bss')
opcodes = code.data()
addr = code['sh_addr']
#print "Entry point: {0}".format(hex(elffile.header['e_entry']))
md = Cs(CS_ARCH_X86, CS_MODE_64)
for i in md.disasm(opcodes, addr):
print "0x%x:\t%s\t%s\t" %(i.address, i.mnemonic, i.op_str)
print "\n\nrodata:\n"
print rodata.data()
print "\n\nplt\n"
print plt.data()
print "\n\ndata\n"
print data.data()
print "\n\nbss\n"
print bss.data()
if __name__ == '__main__':
if len(sys.argv) == 2:
process_file(sys.argv[1])
|
normal
|
{
"blob_id": "5bfaadcd54aaf239d0d89158bfb723c0174c56b1",
"index": 9176,
"step-1": "import sys\nfrom elftools.elf.elffile import ELFFile\nfrom capstone import *\n\ndef process_file(filename):\n with open(filename, 'rb') as f:\n elffile = ELFFile(f)\n code = elffile.get_section_by_name('.text')\n rodata = elffile.get_section_by_name('.rodata')\n plt = elffile.get_section_by_name('.plt')\n data = elffile.get_section_by_name('.data')\n bss = elffile.get_section_by_name('.bss')\n\n opcodes = code.data()\n addr = code['sh_addr']\n #print \"Entry point: {0}\".format(hex(elffile.header['e_entry']))\n md = Cs(CS_ARCH_X86, CS_MODE_64)\n for i in md.disasm(opcodes, addr):\n print \"0x%x:\\t%s\\t%s\\t\" %(i.address, i.mnemonic, i.op_str)\n print \"\\n\\nrodata:\\n\"\n print rodata.data()\n print \"\\n\\nplt\\n\"\n print plt.data()\n print \"\\n\\ndata\\n\"\n print data.data()\n print \"\\n\\nbss\\n\"\n print bss.data()\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n process_file(sys.argv[1])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def get_mask(mask):
r = mask[:, :, 0]
g = mask[:, :, 1]
return r // (r.max() or 1) * -1 + g // (g.max() or 1)
<|reserved_special_token_0|>
def extend(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not
None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)
copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32
) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
if i >= image.shape[1] - 1:
copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)
if mask is not None:
copy_mask[row] = np.append(mask[row], 0)
copy_mask[row][-2] = 1
copy_mask[row][-1] = 1
else:
copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 +
image[row][i + 1] // 2, axis=0)
if mask is not None:
copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)
copy_mask[row][i] = 1
copy_mask[row][i + 1] = 1
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def seam_carve(image, mode, mask):
if mode == 'horizontal shrink':
return cut(image, mask)
elif mode == 'vertical shrink':
transposed_image, transposed_mask, transposed_seam_mask = cut(np.
transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
elif mode == 'horizontal expand':
return extend(image, mask)
else:
transposed_image, transposed_mask, transposed_seam_mask = extend(np
.transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_mask(mask):
r = mask[:, :, 0]
g = mask[:, :, 1]
return r // (r.max() or 1) * -1 + g // (g.max() or 1)
def calculate_brightness(image):
weights = np.array([0.299, 0.587, 0.114])
brightness_matrix = (image * weights).sum(axis=2)
return brightness_matrix
def calculate_energy(brightness):
x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.
newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1
] - brightness[:, -2])[:, np.newaxis]))
y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness
[2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))
return np.sqrt(x_gradient ** 2 + y_gradient ** 2)
<|reserved_special_token_0|>
def cut(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not
None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)
copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32
) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
copy[row] = np.delete(image[row], i, axis=0)
if mask is not None:
copy_mask[row] = np.delete(mask[row], i, axis=0)
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def extend(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not
None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)
copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32
) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
if i >= image.shape[1] - 1:
copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)
if mask is not None:
copy_mask[row] = np.append(mask[row], 0)
copy_mask[row][-2] = 1
copy_mask[row][-1] = 1
else:
copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 +
image[row][i + 1] // 2, axis=0)
if mask is not None:
copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)
copy_mask[row][i] = 1
copy_mask[row][i + 1] = 1
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def seam_carve(image, mode, mask):
if mode == 'horizontal shrink':
return cut(image, mask)
elif mode == 'vertical shrink':
transposed_image, transposed_mask, transposed_seam_mask = cut(np.
transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
elif mode == 'horizontal expand':
return extend(image, mask)
else:
transposed_image, transposed_mask, transposed_seam_mask = extend(np
.transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_mask(mask):
r = mask[:, :, 0]
g = mask[:, :, 1]
return r // (r.max() or 1) * -1 + g // (g.max() or 1)
def calculate_brightness(image):
weights = np.array([0.299, 0.587, 0.114])
brightness_matrix = (image * weights).sum(axis=2)
return brightness_matrix
def calculate_energy(brightness):
x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.
newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1
] - brightness[:, -2])[:, np.newaxis]))
y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness
[2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))
return np.sqrt(x_gradient ** 2 + y_gradient ** 2)
def calculate_minimal_seam_matrix(pre_energy, mask=None):
min_seam_searcher = (pre_energy + mask if mask is not None else
pre_energy.copy())
for i in range(1, min_seam_searcher.shape[0]):
row = min_seam_searcher[i - 1]
minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append
(row[1:], row[-1]))).min(axis=0)
min_seam_searcher[i] += minimum
return min_seam_searcher
<|reserved_special_token_0|>
def cut(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not
None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)
copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32
) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
copy[row] = np.delete(image[row], i, axis=0)
if mask is not None:
copy_mask[row] = np.delete(mask[row], i, axis=0)
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def extend(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not
None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)
copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32
) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
if i >= image.shape[1] - 1:
copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)
if mask is not None:
copy_mask[row] = np.append(mask[row], 0)
copy_mask[row][-2] = 1
copy_mask[row][-1] = 1
else:
copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 +
image[row][i + 1] // 2, axis=0)
if mask is not None:
copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)
copy_mask[row][i] = 1
copy_mask[row][i + 1] = 1
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def seam_carve(image, mode, mask):
if mode == 'horizontal shrink':
return cut(image, mask)
elif mode == 'vertical shrink':
transposed_image, transposed_mask, transposed_seam_mask = cut(np.
transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
elif mode == 'horizontal expand':
return extend(image, mask)
else:
transposed_image, transposed_mask, transposed_seam_mask = extend(np
.transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
<|reserved_special_token_1|>
import numpy as np
def get_mask(mask):
r = mask[:, :, 0]
g = mask[:, :, 1]
return r // (r.max() or 1) * -1 + g // (g.max() or 1)
def calculate_brightness(image):
weights = np.array([0.299, 0.587, 0.114])
brightness_matrix = (image * weights).sum(axis=2)
return brightness_matrix
def calculate_energy(brightness):
x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.
newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1
] - brightness[:, -2])[:, np.newaxis]))
y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness
[2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))
return np.sqrt(x_gradient ** 2 + y_gradient ** 2)
def calculate_minimal_seam_matrix(pre_energy, mask=None):
min_seam_searcher = (pre_energy + mask if mask is not None else
pre_energy.copy())
for i in range(1, min_seam_searcher.shape[0]):
row = min_seam_searcher[i - 1]
minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append
(row[1:], row[-1]))).min(axis=0)
min_seam_searcher[i] += minimum
return min_seam_searcher
def get_minimal_seam(min_seam):
seam = np.zeros(min_seam.shape[0], dtype=np.int32)
seam[-1] = np.argmin(min_seam[-1])
for i in range(min_seam.shape[0] - 2, -1, -1):
last = seam[i + 1]
if last == 0:
seam[i] = np.argmin(min_seam[i, :2])
elif last == min_seam.shape[1] - 1:
seam[i] = last + np.argmin(min_seam[i, last - 1:]) - 1
else:
seam[i] = last + np.argmin(min_seam[i, last - 1:last + 2]) - 1
return seam
def cut(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not
None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)
copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32
) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
copy[row] = np.delete(image[row], i, axis=0)
if mask is not None:
copy_mask[row] = np.delete(mask[row], i, axis=0)
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def extend(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not
None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)
copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32
) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
if i >= image.shape[1] - 1:
copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)
if mask is not None:
copy_mask[row] = np.append(mask[row], 0)
copy_mask[row][-2] = 1
copy_mask[row][-1] = 1
else:
copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 +
image[row][i + 1] // 2, axis=0)
if mask is not None:
copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)
copy_mask[row][i] = 1
copy_mask[row][i + 1] = 1
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def seam_carve(image, mode, mask):
if mode == 'horizontal shrink':
return cut(image, mask)
elif mode == 'vertical shrink':
transposed_image, transposed_mask, transposed_seam_mask = cut(np.
transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
elif mode == 'horizontal expand':
return extend(image, mask)
else:
transposed_image, transposed_mask, transposed_seam_mask = extend(np
.transpose(image, (1, 0, 2)), mask.T if mask is not None else None)
return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.
T if mask is not None else None, transposed_seam_mask.T)
<|reserved_special_token_1|>
import numpy as np
def get_mask(mask):
r = mask[:, :, 0]
g = mask[:, :, 1]
return r // (r.max() or 1) * -1 + g // (g.max() or 1)
def calculate_brightness(image):
weights = np.array([0.299, 0.587, 0.114])
brightness_matrix = (image*weights).sum(axis=2)
return brightness_matrix
def calculate_energy(brightness):
x_gradient = np.hstack((
(brightness[:, 1] - brightness[:, 0])[:, np.newaxis],
brightness[:, 2:] - brightness[:, :-2],
(brightness[:, -1] - brightness[:, -2])[:, np.newaxis]
))
y_gradient = np.vstack((
brightness[1, :] - brightness[0, :],
brightness[2:, :] - brightness[:-2, :],
brightness[-1, :] - brightness[-2, :]
))
return np.sqrt(x_gradient ** 2 + y_gradient ** 2)
def calculate_minimal_seam_matrix(pre_energy, mask=None):
min_seam_searcher = pre_energy + mask if mask is not None else pre_energy.copy()
for i in range(1, min_seam_searcher.shape[0]):
row = min_seam_searcher[i-1]
minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append(row[1:], row[-1]))).min(axis=0)
min_seam_searcher[i] += minimum
return min_seam_searcher
def get_minimal_seam(min_seam):
seam = np.zeros(min_seam.shape[0], dtype=np.int32)
seam[-1] = np.argmin(min_seam[-1])
for i in range(min_seam.shape[0] - 2, -1, -1):
last = seam[i+1]
if last == 0:
seam[i] = np.argmin(min_seam[i, : 2])
elif last == min_seam.shape[1] - 1:
seam[i] = last + np.argmin(min_seam[i, (last - 1):]) - 1
else:
seam[i] = last + np.argmin(min_seam[i, (last - 1): (last + 2)]) - 1
return seam
def cut(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)
copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
copy[row] = np.delete(image[row], i, axis=0)
if mask is not None:
copy_mask[row] = np.delete(mask[row], i, axis=0)
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def extend(image, mask):
brightness = calculate_brightness(image)
energy = calculate_energy(brightness)
mult = image.shape[0] * image.shape[1] * 256
min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)
seam = get_minimal_seam(min_seam)
copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)
copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32) if mask is not None else None
seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)
for row, i in enumerate(seam):
if i >= image.shape[1] - 1:
copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)
if mask is not None:
copy_mask[row] = np.append(mask[row], 0)
copy_mask[row][-2] = 1
copy_mask[row][-1] = 1
else:
copy[row] = np.insert(image[row], i+1, image[row][i] // 2 + image[row][i+1] // 2, axis=0)
if mask is not None:
copy_mask[row] = np.insert(mask[row], i+1, 0, axis=0)
copy_mask[row][i] = 1
copy_mask[row][i+1] = 1
seam_mask[row][i] = 1
return copy, copy_mask, seam_mask
def seam_carve(image, mode, mask):
if mode == 'horizontal shrink':
return cut(image, mask)
elif mode == 'vertical shrink':
transposed_image, transposed_mask, transposed_seam_mask = cut(
np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None
)
return (np.transpose(transposed_image, (1, 0, 2)),
transposed_mask.T if mask is not None else None,
transposed_seam_mask.T)
elif mode == 'horizontal expand':
return extend(image, mask)
else:
transposed_image, transposed_mask, transposed_seam_mask = extend(
np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None
)
return (np.transpose(transposed_image, (1, 0, 2)),
transposed_mask.T if mask is not None else None,
transposed_seam_mask.T)
|
flexible
|
{
"blob_id": "7130a382784955780a3f258c81ce05c61915af56",
"index": 5000,
"step-1": "<mask token>\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\n<mask token>\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-2": "<mask token>\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image * weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.\n newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1\n ] - brightness[:, -2])[:, np.newaxis]))\n y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness\n [2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\n<mask token>\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-3": "<mask token>\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image * weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.\n newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1\n ] - brightness[:, -2])[:, np.newaxis]))\n y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness\n [2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\ndef calculate_minimal_seam_matrix(pre_energy, mask=None):\n min_seam_searcher = (pre_energy + mask if mask is not None else\n pre_energy.copy())\n for i in range(1, min_seam_searcher.shape[0]):\n row = min_seam_searcher[i - 1]\n minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append\n (row[1:], row[-1]))).min(axis=0)\n min_seam_searcher[i] += minimum\n return min_seam_searcher\n\n\n<mask token>\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-4": "import numpy as np\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image * weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack(((brightness[:, 1] - brightness[:, 0])[:, np.\n newaxis], brightness[:, 2:] - brightness[:, :-2], (brightness[:, -1\n ] - brightness[:, -2])[:, np.newaxis]))\n y_gradient = np.vstack((brightness[1, :] - brightness[0, :], brightness\n [2:, :] - brightness[:-2, :], brightness[-1, :] - brightness[-2, :]))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\ndef calculate_minimal_seam_matrix(pre_energy, mask=None):\n min_seam_searcher = (pre_energy + mask if mask is not None else\n pre_energy.copy())\n for i in range(1, min_seam_searcher.shape[0]):\n row = min_seam_searcher[i - 1]\n minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append\n (row[1:], row[-1]))).min(axis=0)\n min_seam_searcher[i] += minimum\n return min_seam_searcher\n\n\ndef get_minimal_seam(min_seam):\n seam = np.zeros(min_seam.shape[0], dtype=np.int32)\n seam[-1] = np.argmin(min_seam[-1])\n for i in range(min_seam.shape[0] - 2, -1, -1):\n last = seam[i + 1]\n if last == 0:\n seam[i] = np.argmin(min_seam[i, :2])\n elif last == min_seam.shape[1] - 1:\n seam[i] = last + np.argmin(min_seam[i, last - 1:]) - 1\n else:\n seam[i] = last + np.argmin(min_seam[i, last - 1:last + 2]) - 1\n return seam\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not\n None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32\n ) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i + 1, image[row][i] // 2 + \n image[row][i + 1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i + 1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i + 1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(np.\n transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(np\n .transpose(image, (1, 0, 2)), mask.T if mask is not None else None)\n return (np.transpose(transposed_image, (1, 0, 2)), transposed_mask.\n T if mask is not None else None, transposed_seam_mask.T)\n",
"step-5": "import numpy as np\n\n\ndef get_mask(mask):\n r = mask[:, :, 0]\n g = mask[:, :, 1]\n return r // (r.max() or 1) * -1 + g // (g.max() or 1)\n\n\ndef calculate_brightness(image):\n weights = np.array([0.299, 0.587, 0.114])\n brightness_matrix = (image*weights).sum(axis=2)\n return brightness_matrix\n\n\ndef calculate_energy(brightness):\n x_gradient = np.hstack((\n (brightness[:, 1] - brightness[:, 0])[:, np.newaxis],\n brightness[:, 2:] - brightness[:, :-2],\n (brightness[:, -1] - brightness[:, -2])[:, np.newaxis]\n ))\n y_gradient = np.vstack((\n brightness[1, :] - brightness[0, :],\n brightness[2:, :] - brightness[:-2, :],\n brightness[-1, :] - brightness[-2, :]\n ))\n return np.sqrt(x_gradient ** 2 + y_gradient ** 2)\n\n\ndef calculate_minimal_seam_matrix(pre_energy, mask=None):\n min_seam_searcher = pre_energy + mask if mask is not None else pre_energy.copy()\n for i in range(1, min_seam_searcher.shape[0]):\n row = min_seam_searcher[i-1]\n minimum = np.vstack((np.insert(row[:-1], 0, row[0]), row, np.append(row[1:], row[-1]))).min(axis=0)\n min_seam_searcher[i] += minimum\n return min_seam_searcher\n\n\ndef get_minimal_seam(min_seam):\n seam = np.zeros(min_seam.shape[0], dtype=np.int32)\n seam[-1] = np.argmin(min_seam[-1])\n for i in range(min_seam.shape[0] - 2, -1, -1):\n last = seam[i+1]\n if last == 0:\n seam[i] = np.argmin(min_seam[i, : 2])\n elif last == min_seam.shape[1] - 1:\n seam[i] = last + np.argmin(min_seam[i, (last - 1):]) - 1\n else:\n seam[i] = last + np.argmin(min_seam[i, (last - 1): (last + 2)]) - 1\n return seam\n\n\ndef cut(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] - 1, 3), np.uint8)\n copy_mask = np.empty((image.shape[0], image.shape[1] - 1), np.int32) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n copy[row] = np.delete(image[row], i, axis=0)\n if mask is not None:\n copy_mask[row] = np.delete(mask[row], i, axis=0)\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef extend(image, mask):\n brightness = calculate_brightness(image)\n energy = calculate_energy(brightness)\n mult = image.shape[0] * image.shape[1] * 256\n min_seam = calculate_minimal_seam_matrix(energy, mask * mult if mask is not None else None)\n seam = get_minimal_seam(min_seam)\n copy = np.empty((image.shape[0], image.shape[1] + 1, 3), np.uint8)\n copy_mask = np.zeros((image.shape[0], image.shape[1] + 1), np.int32) if mask is not None else None\n seam_mask = np.zeros(image.shape[:2], dtype=np.uint8)\n for row, i in enumerate(seam):\n if i >= image.shape[1] - 1:\n copy[row] = np.concatenate((image[row], [image[row][-1]]), axis=0)\n if mask is not None:\n copy_mask[row] = np.append(mask[row], 0)\n copy_mask[row][-2] = 1\n copy_mask[row][-1] = 1\n else:\n copy[row] = np.insert(image[row], i+1, image[row][i] // 2 + image[row][i+1] // 2, axis=0)\n if mask is not None:\n copy_mask[row] = np.insert(mask[row], i+1, 0, axis=0)\n copy_mask[row][i] = 1\n copy_mask[row][i+1] = 1\n seam_mask[row][i] = 1\n return copy, copy_mask, seam_mask\n\n\ndef seam_carve(image, mode, mask):\n if mode == 'horizontal shrink':\n return cut(image, mask)\n elif mode == 'vertical shrink':\n transposed_image, transposed_mask, transposed_seam_mask = cut(\n np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None\n )\n return (np.transpose(transposed_image, (1, 0, 2)),\n transposed_mask.T if mask is not None else None,\n transposed_seam_mask.T)\n elif mode == 'horizontal expand':\n return extend(image, mask)\n else:\n transposed_image, transposed_mask, transposed_seam_mask = extend(\n np.transpose(image, (1, 0, 2)), mask.T if mask is not None else None\n )\n return (np.transpose(transposed_image, (1, 0, 2)),\n transposed_mask.T if mask is not None else None,\n transposed_seam_mask.T)\n",
"step-ids": [
3,
6,
7,
9,
10
]
}
|
[
3,
6,
7,
9,
10
] |
env = 'DEV' ## this had to be in uppercase
platform = 'hive'
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.ingestion.extractor.schema_util import *
from datahub.metadata.schema_classes import (
DatasetSnapshotClass,
MetadataChangeEventClass,
OwnerClass,
OwnershipClass,
OwnershipTypeClass,
)
source_file_path = '/Users/snandi/Downloads/data/owner_data.json'
# created an emitter where the mce will be emitted, it will be DataHub's Kafka broker in docker (for PoC)
# emitter = DatahubKafkaEmitter(
# KafkaEmitterConfig.parse_obj(
# # This is the same config format as the standard Kafka sink's YAML.
# {
# "connection": {
# "bootstrap": "localhost:9002",
# "producer_config": {},
# "schema_registry_url": "localhost:8081",
# }
# }
# )
# )
# todo: 1. We have to make a living doc of table ownership 2. If we decide that to be google doc,
# then create an Oauth or service account to access the sheet programatically
import json
recs = []
with open(source_file_path, 'r') as f:
for _i in f:
row = json.loads(_i.rstrip('\n'))
Email= row['Email']
row['Owner'] = [f"urn:li:corpuser:{Email}"]
recs.append(row)
# recs = [{'schema_name': 'integrated_core', 'table_name': 'order_fact', 'owner': ["urn:li:corpuser:hsk@grubhub.com"]}]
# Process messages
def add_owner_mce(m) -> MetadataChangeEventClass:
entity = m['Table']
schema = m['Schema']
dataset_name = f"{schema}.{entity}"
owners = [
OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER)
for owner in m['Owner']
]
changed_snapshot = DatasetSnapshotClass(
urn=f"urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})",
aspects=[], # we append to this list later on
)
changed_snapshot.aspects.append(OwnershipClass(owners))
mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)
return mce
def callback(err, msg):
print('ingested row')
if err:
# Handle the metadata emission error.
print("error:", err)
num_recs = len(recs)
# try REST emitter
Restemitter = DatahubRestEmitter("http://10.174.24.179:8080")
for _i in range(num_recs):
print('sending data to datahub')
mce = add_owner_mce(recs[_i])
print(mce)
# emit the mce to kafka
# emitter.emit_mce_async(mce, callback)
# emitter.flush()
# emit mce to REST
Restemitter.emit_mce(mce)
num_recs -= 1
|
normal
|
{
"blob_id": "7ad5e803afa42790e878bfb923eddcfde2d21928",
"index": 1501,
"step-1": "<mask token>\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email = row['Email']\n row['Owner'] = [f'urn:li:corpuser:{Email}']\n recs.append(row)\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\n<mask token>\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n Restemitter.emit_mce(mce)\n num_recs -= 1\n",
"step-3": "env = 'DEV'\nplatform = 'hive'\n<mask token>\nsource_file_path = '/Users/snandi/Downloads/data/owner_data.json'\n<mask token>\nrecs = []\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email = row['Email']\n row['Owner'] = [f'urn:li:corpuser:{Email}']\n recs.append(row)\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\nnum_recs = len(recs)\nRestemitter = DatahubRestEmitter('http://10.174.24.179:8080')\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n Restemitter.emit_mce(mce)\n num_recs -= 1\n",
"step-4": "env = 'DEV'\nplatform = 'hive'\nfrom datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig\nfrom datahub.emitter.rest_emitter import DatahubRestEmitter\nfrom datahub.ingestion.extractor.schema_util import *\nfrom datahub.metadata.schema_classes import DatasetSnapshotClass, MetadataChangeEventClass, OwnerClass, OwnershipClass, OwnershipTypeClass\nsource_file_path = '/Users/snandi/Downloads/data/owner_data.json'\nimport json\nrecs = []\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email = row['Email']\n row['Owner'] = [f'urn:li:corpuser:{Email}']\n recs.append(row)\n\n\ndef add_owner_mce(m) ->MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f'{schema}.{entity}'\n owners = [OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER) for\n owner in m['Owner']]\n changed_snapshot = DatasetSnapshotClass(urn=\n f'urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})'\n , aspects=[])\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n print('error:', err)\n\n\nnum_recs = len(recs)\nRestemitter = DatahubRestEmitter('http://10.174.24.179:8080')\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n Restemitter.emit_mce(mce)\n num_recs -= 1\n",
"step-5": "\nenv = 'DEV' ## this had to be in uppercase\nplatform = 'hive'\n\nfrom datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig\nfrom datahub.emitter.rest_emitter import DatahubRestEmitter\n\nfrom datahub.ingestion.extractor.schema_util import *\n\nfrom datahub.metadata.schema_classes import (\n DatasetSnapshotClass,\n MetadataChangeEventClass,\n OwnerClass,\n OwnershipClass,\n OwnershipTypeClass,\n)\n\nsource_file_path = '/Users/snandi/Downloads/data/owner_data.json'\n\n# created an emitter where the mce will be emitted, it will be DataHub's Kafka broker in docker (for PoC)\n# emitter = DatahubKafkaEmitter(\n# KafkaEmitterConfig.parse_obj(\n# # This is the same config format as the standard Kafka sink's YAML.\n# {\n# \"connection\": {\n# \"bootstrap\": \"localhost:9002\",\n# \"producer_config\": {},\n# \"schema_registry_url\": \"localhost:8081\",\n# }\n# }\n# )\n# )\n\n\n# todo: 1. We have to make a living doc of table ownership 2. If we decide that to be google doc,\n# then create an Oauth or service account to access the sheet programatically\n\nimport json\nrecs = []\nwith open(source_file_path, 'r') as f:\n for _i in f:\n row = json.loads(_i.rstrip('\\n'))\n Email= row['Email']\n row['Owner'] = [f\"urn:li:corpuser:{Email}\"]\n recs.append(row)\n\n# recs = [{'schema_name': 'integrated_core', 'table_name': 'order_fact', 'owner': [\"urn:li:corpuser:hsk@grubhub.com\"]}]\n\n\n# Process messages\ndef add_owner_mce(m) -> MetadataChangeEventClass:\n entity = m['Table']\n schema = m['Schema']\n dataset_name = f\"{schema}.{entity}\"\n\n owners = [\n OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER)\n for owner in m['Owner']\n ]\n\n changed_snapshot = DatasetSnapshotClass(\n urn=f\"urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},{env})\",\n aspects=[], # we append to this list later on\n )\n changed_snapshot.aspects.append(OwnershipClass(owners))\n mce = MetadataChangeEventClass(proposedSnapshot=changed_snapshot)\n return mce\n\n\ndef callback(err, msg):\n print('ingested row')\n if err:\n # Handle the metadata emission error.\n print(\"error:\", err)\n\n\nnum_recs = len(recs)\n\n# try REST emitter\nRestemitter = DatahubRestEmitter(\"http://10.174.24.179:8080\")\n\n\nfor _i in range(num_recs):\n print('sending data to datahub')\n mce = add_owner_mce(recs[_i])\n print(mce)\n # emit the mce to kafka\n # emitter.emit_mce_async(mce, callback)\n # emitter.flush()\n # emit mce to REST\n Restemitter.emit_mce(mce)\n num_recs -= 1\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#coding=utf-8
from django.contrib import admin
from models import *
#增加额外的方法
def make_published(modeladmin, request, queryset):
queryset.update(state=1)
class OrderInfoAdmin(admin.ModelAdmin):
list_display = ('ordernum', 'total', 'state')
search_fields = ('total', )
list_filter = ('bpub_date',)
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = ('isDelete',)
#2017/1/05注册admin站点
admin.site.register(cart)
admin.site.register(address_info,address_infoAdmin)
admin.site.register(OrderInfo,OrderInfoAdmin)
admin.site.register(OrderDetailInfo)
admin.site.register(GoodsInfo)
|
normal
|
{
"blob_id": "74a0282495bf4bbd34b397e0922074659a66d6ff",
"index": 4809,
"step-1": "<mask token>\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\nadmin.site.register(cart)\nadmin.site.register(address_info, address_infoAdmin)\nadmin.site.register(OrderInfo, OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n",
"step-4": "from django.contrib import admin\nfrom models import *\n\n\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = 'ordernum', 'total', 'state'\n search_fields = 'total',\n list_filter = 'bpub_date',\n actions = [make_published]\n\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = 'isDelete',\n\n\nadmin.site.register(cart)\nadmin.site.register(address_info, address_infoAdmin)\nadmin.site.register(OrderInfo, OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n",
"step-5": "#coding=utf-8\nfrom django.contrib import admin\nfrom models import *\n\n#增加额外的方法\ndef make_published(modeladmin, request, queryset):\n queryset.update(state=1)\n\nclass OrderInfoAdmin(admin.ModelAdmin):\n list_display = ('ordernum', 'total', 'state')\n search_fields = ('total', )\n list_filter = ('bpub_date',)\n actions = [make_published]\n\nclass address_infoAdmin(admin.ModelAdmin):\n exclude = ('isDelete',)\n\n\n#2017/1/05注册admin站点\nadmin.site.register(cart)\nadmin.site.register(address_info,address_infoAdmin)\nadmin.site.register(OrderInfo,OrderInfoAdmin)\nadmin.site.register(OrderDetailInfo)\nadmin.site.register(GoodsInfo)\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 2.2 on 2020-10-26 15:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('viajes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='viajes',
options={'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'},
),
]
|
normal
|
{
"blob_id": "760a5a168575a0ea12b93cb58c1e81e313704e35",
"index": 6276,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('viajes', '0001_initial')]\n operations = [migrations.AlterModelOptions(name='viajes', options={\n 'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})\n ]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('viajes', '0001_initial')]\n operations = [migrations.AlterModelOptions(name='viajes', options={\n 'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})\n ]\n",
"step-5": "# Generated by Django 2.2 on 2020-10-26 15:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('viajes', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='viajes',\n options={'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/bin/env python3
import os
##print(os.environ)
##print("**********************************************************************")
##print("**********************************************************************")
##print("**********************************************************************")
##print(str(os.environ.get('PYTHONPATH')))
##print(str(os.environ.get('PYTHON3PATH')))
import sys
##print("*************** This is system version info ***************************")
##print(sys.version_info)
#import platform
##print("*************** This is python version info ***************************")
##print(platform.python_version())
import ROOT
##print("Succesfully imported ROOT")
import math
import datetime
import copy
from array import array
from skimtree_utils_ssWW_wFakes_old import *
if not "_UL" in sys.argv[1]:
if sys.argv[4] == 'remote':
from samples import *
Debug = False
else:
from samples.samples import *
Debug = True
else:
if sys.argv[4] == 'remote':
from samplesUL import *
Debug = False
else:
from samples.samplesUL import *
Debug = True
sample = sample_dict[sys.argv[1]]
part_idx = sys.argv[2]
file_list = list(map(str, sys.argv[3].strip('[]').split(',')))
#print("file_list: ", file_list, "\nloop #1 over it")
#for infile in file_list:
#print(infile)
def AddOverflow(h):
nxbins = h.GetXaxis().GetNbins()
nybins = h.GetYaxis().GetNbins()
idxx = 0.
idxy = nybins + 1
for ix in range(nxbins):
idxx = ix + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(idxx, nybins)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(idxx, nybins, new_last_bincont)
idxx = nxbins + 1
idxy = 0.
for iy in range(nybins):
idxy = iy + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(nxbins, idxy)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(nxbins, idxy, new_last_bincont)
startTime = datetime.datetime.now()
print("Starting running at " + str(startTime))
ROOT.gROOT.SetBatch()
leadingjet_ptcut = 150.
chain = ROOT.TChain('Events')
#print(chain)
#print("loop #2 over file_list")
for infile in file_list:
#print("Adding %s to the chain" %(infile))
chain.Add(infile)
print("Number of events in chain " + str(chain.GetEntries()))
#print("Number of events in tree from chain " + str((chain.GetTree()).GetEntries()))
#print("Type of tree from chain " + str(type(chain.GetTree())))
#treechain = (ROOT.TTree)(chain.GetTree())
tree = InputTree(chain)
print("Number of entries: " +str(tree.GetEntries()))
#print("tree: ", tree)
isMC = True
if ('Data' in sample.name):
isMC = False
#MCReco = MCReco * isMC
IsDim8 = False
if 'aQGC' in sample.name:
IsDim8 = True
dataEle = False
dataMu = False
if 'DataMu' in sample.name:
dataMu = True
if 'DataEle' in sample.name:
dataEle = True
username = str(os.environ.get('USER'))
inituser = str(os.environ.get('USER')[0])
#folder = 'vbtag'
#if not os.path.exists("/eos/user/" + inituser + "/" + username + "/VBS/nosynch/" + folder + "/" + sample.label):
#os.makedirs("/eos/user/" + inituser + "/" + username +"/VBS/nosynch/" + folder + "/" + sample.label)
#outpath = "/eos/user/" + inituser + "/" + username +"/VBS/nosynch/" + folder + "/" + sample.label + "/"
#++++++++++++++++++++++++++++++++++
#++ branching the new trees ++
#++++++++++++++++++++++++++++++++++
#print(outpath + sample.label+"_part"+str(part_idx)+".root")
outTreeFile = ROOT.TFile(sample.label+"_part"+str(part_idx)+".root", "RECREATE") #some name of the output file
#++++++++++++++++++++++++++++++++++
#++ All category ++
#++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++
#++ Efficiency studies ++
#++++++++++++++++++++++++++++++++++
ptNBins = 100
ptMin = 0
ptMax = 1000.
etaNBins = 60
etaMin = -3.
etaMax = 3.
ptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])
etabins = array.array('d', [0.0, 0.8, 1.6, 2.4])
nptbins = len(ptbins)-1
netabins = len(etabins)-1
h2_BTaggingEff_Denom_b = ROOT.TH2D("h2_BTaggingEff_Denom_b", "MC bjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_c = ROOT.TH2D("h2_BTaggingEff_Denom_c", "MC cjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_udsg = ROOT.TH2D("h2_BTaggingEff_Denom_udsg", "MC ljet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_b = ROOT.TH2D("h2_BTaggingEff_Num_b", "Tagged bjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_c = ROOT.TH2D("h2_BTaggingEff_Num_c", "Tagged cjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_udsg = ROOT.TH2D("h2_BTaggingEff_Num_udsg", "Tagged ljet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_b.Sumw2()
h2_BTaggingEff_Denom_c.Sumw2()
h2_BTaggingEff_Denom_udsg.Sumw2()
h2_BTaggingEff_Num_b.Sumw2()
h2_BTaggingEff_Num_c.Sumw2()
h2_BTaggingEff_Num_udsg.Sumw2()
#++++++++++++++++++++++++++++++++++
#++ looping over the events ++
#++++++++++++++++++++++++++++++++++
for i in range(tree.GetEntries()):
#++++++++++++++++++++++++++++++++++
#++ taking objects ++
#++++++++++++++++++++++++++++++++++
if Debug:
if i > 100:
break
if not Debug and i%5000 == 0:
print("Event #", i+1, " out of ", tree.GetEntries())
event = Event(tree,i)
electrons = Collection(event, "Electron")
muons = Collection(event, "Muon")
jets = Collection(event, "Jet")
njets = len(jets)
fatjets = Collection(event, "FatJet")
HLT = Object(event, "HLT")
PV = Object(event, "PV")
Flag = Object(event, 'Flag')
#++++++++++++++++++++++++++++++++++
#++ defining variables ++
#++++++++++++++++++++++++++++++++++
tightlep = None
tightlep_p4 = None
tightlep_p4t = None
tightlep_SF = None
tightlep_SFUp = None
tightlep_SFDown = None
recomet_p4t = None
PF_SF = None
PF_SFUp = None
PF_SFDown = None
PU_SF = None
PU_SFUp = None
PU_SFDown = None
#++++++++++++++++++++++++++++++++++
#++ starting the analysis ++
#++++++++++++++++++++++++++++++++++
#VetoMu = get_LooseMu(muons)
#goodMu = get_Mu(muons)
#VetoEle = get_LooseEle(electrons)
#goodEle = get_Ele(electrons)
year = sample.year
if(isMC):
runPeriod = ''
else:
runPeriod = sample.runP
if not isMC:
if not Flag.eeBadScFilter:
continue
#print "------ ", i
passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod, Flag)
if noTrigger:
continue
'''
GoodEle, ele_TightRegion = SelectLepton(electrons, False)
GoodMu, mu_TightRegion = SelectLepton(muons, True)
if GoodEle is None and GoodMu is None:
continue
ele_lepton_veto = -1
mu_lepton_veto = -1
if GoodEle != None:
ele_lepton_veto = LepVeto(GoodEle, electrons, muons)
if GoodMu != None:
mu_lepton_veto = LepVeto(GoodMu, electrons, muons)
SingleEle=False
SingleMu=False
ElMu=False
LeadLepFamily="not selected"
GoodLep = None
leptons = None
lepton_TightRegion = 0
if 'DataHT' not in sample.label:
if passEle and not passMu:
if GoodEle != None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
continue
elif passMu and not passEle:
if GoodMu != None and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleEle = False
SingleMu = True
else:
continue
elif passMu and passEle:
ElMu=True
else:
continue
else:
if passHT:
ElMu = True
else:
continue
if ElMu:
if GoodMu==None and GoodEle!=None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif GoodMu!=None and mu_lepton_veto and GoodEle==None:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif GoodMu!=None and GoodEle!=None:
if ele_lepton_veto and not mu_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif not ele_lepton_veto and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif ele_lepton_veto and mu_lepton_veto:
if GoodEle.pt > GoodMu.pt:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
else:
continue
else:
continue
vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)
if SingleEle==True:
if isMC:
HLT_effLumi = lumiFinder("Ele", vTrigEle, sample.year)
leptons = electrons
elif SingleMu==True:
if isMC:
HLT_effLumi = lumiFinder("Mu", vTrigMu, sample.year)
leptons = muons
elif not (SingleMu or SingleEle):
continue
if SingleEle and dataMu:
continue
if SingleMu and dataEle:
continue
if GoodLep==None or (lepton_TightRegion < 1):
if Debug:
print("exiting at lepton selection (without saving)")
continue
'''
######################################
## Selecting only jets with pt>30 ##
######################################
goodJets = get_Jet(jets, 30)
bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')
if (len(goodJets) < 2 or len(fatjets) < 2):
continue
for jet in goodJets:
if(abs(jet.partonFlavour) == 5):
h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))
if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):
h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))
elif(abs(jet.partonFlavour) == 4):
h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))
if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):
h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))
else:
h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))
if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):
h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))
outTreeFile.cd()
h2_BTaggingEff_Denom_b.Write()
h2_BTaggingEff_Denom_c.Write()
h2_BTaggingEff_Denom_udsg.Write()
h2_BTaggingEff_Num_b.Write()
h2_BTaggingEff_Num_c.Write()
h2_BTaggingEff_Num_udsg.Write()
h2_Eff_b = ROOT.TEfficiency("h2_BTaggingEff_b", "bjet efficiency;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, "")
h2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, "")
h2_Eff_c = ROOT.TEfficiency("h2_BTaggingEff_c", "cjet efficiency;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, "")
h2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, "")
h2_Eff_udsg = ROOT.TEfficiency("h2_BTaggingEff_udsg", "light jet efficiency;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, "")
h2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, "")
h2_Eff_b.Write()
h2_Eff_c.Write()
h2_Eff_udsg.Write()
endTime = datetime.datetime.now()
print("Ending running at " + str(endTime))
|
normal
|
{
"blob_id": "b49696d6cac5fbf97172aa7cf16903d002262b5c",
"index": 1940,
"step-1": "<mask token>\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\n<mask token>\n",
"step-2": "<mask token>\nif not '_UL' in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelif sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\nelse:\n from samples.samplesUL import *\n Debug = True\n<mask token>\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\n<mask token>\nprint('Starting running at ' + str(startTime))\nROOT.gROOT.SetBatch()\n<mask token>\nfor infile in file_list:\n chain.Add(infile)\nprint('Number of events in chain ' + str(chain.GetEntries()))\n<mask token>\nprint('Number of entries: ' + str(tree.GetEntries()))\n<mask token>\nif 'Data' in sample.name:\n isMC = False\n<mask token>\nif 'aQGC' in sample.name:\n IsDim8 = True\n<mask token>\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\n<mask token>\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\nfor i in range(tree.GetEntries()):\n if Debug:\n if i > 100:\n break\n if not Debug and i % 5000 == 0:\n print('Event #', i + 1, ' out of ', tree.GetEntries())\n event = Event(tree, i)\n electrons = Collection(event, 'Electron')\n muons = Collection(event, 'Muon')\n jets = Collection(event, 'Jet')\n njets = len(jets)\n fatjets = Collection(event, 'FatJet')\n HLT = Object(event, 'HLT')\n PV = Object(event, 'PV')\n Flag = Object(event, 'Flag')\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n year = sample.year\n if isMC:\n runPeriod = ''\n else:\n runPeriod = sample.runP\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,\n Flag)\n if noTrigger:\n continue\n \"\"\"\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n \"\"\"\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n if len(goodJets) < 2 or len(fatjets) < 2:\n continue\n for jet in goodJets:\n if abs(jet.partonFlavour) == 5:\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif abs(jet.partonFlavour) == 4:\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\n<mask token>\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')\n<mask token>\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')\n<mask token>\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\n<mask token>\nprint('Ending running at ' + str(endTime))\n",
"step-3": "<mask token>\nif not '_UL' in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelif sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\nelse:\n from samples.samplesUL import *\n Debug = True\nsample = sample_dict[sys.argv[1]]\npart_idx = sys.argv[2]\nfile_list = list(map(str, sys.argv[3].strip('[]').split(',')))\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\nstartTime = datetime.datetime.now()\nprint('Starting running at ' + str(startTime))\nROOT.gROOT.SetBatch()\nleadingjet_ptcut = 150.0\nchain = ROOT.TChain('Events')\nfor infile in file_list:\n chain.Add(infile)\nprint('Number of events in chain ' + str(chain.GetEntries()))\ntree = InputTree(chain)\nprint('Number of entries: ' + str(tree.GetEntries()))\nisMC = True\nif 'Data' in sample.name:\n isMC = False\nIsDim8 = False\nif 'aQGC' in sample.name:\n IsDim8 = True\ndataEle = False\ndataMu = False\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\nusername = str(os.environ.get('USER'))\ninituser = str(os.environ.get('USER')[0])\noutTreeFile = ROOT.TFile(sample.label + '_part' + str(part_idx) + '.root',\n 'RECREATE')\nptNBins = 100\nptMin = 0\nptMax = 1000.0\netaNBins = 60\netaMin = -3.0\netaMax = 3.0\nptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])\netabins = array.array('d', [0.0, 0.8, 1.6, 2.4])\nnptbins = len(ptbins) - 1\nnetabins = len(etabins) - 1\nh2_BTaggingEff_Denom_b = ROOT.TH2D('h2_BTaggingEff_Denom_b',\n 'MC bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_c = ROOT.TH2D('h2_BTaggingEff_Denom_c',\n 'MC cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_udsg = ROOT.TH2D('h2_BTaggingEff_Denom_udsg',\n 'MC ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_b = ROOT.TH2D('h2_BTaggingEff_Num_b',\n 'Tagged bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_c = ROOT.TH2D('h2_BTaggingEff_Num_c',\n 'Tagged cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_udsg = ROOT.TH2D('h2_BTaggingEff_Num_udsg',\n 'Tagged ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\nfor i in range(tree.GetEntries()):\n if Debug:\n if i > 100:\n break\n if not Debug and i % 5000 == 0:\n print('Event #', i + 1, ' out of ', tree.GetEntries())\n event = Event(tree, i)\n electrons = Collection(event, 'Electron')\n muons = Collection(event, 'Muon')\n jets = Collection(event, 'Jet')\n njets = len(jets)\n fatjets = Collection(event, 'FatJet')\n HLT = Object(event, 'HLT')\n PV = Object(event, 'PV')\n Flag = Object(event, 'Flag')\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n year = sample.year\n if isMC:\n runPeriod = ''\n else:\n runPeriod = sample.runP\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,\n Flag)\n if noTrigger:\n continue\n \"\"\"\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n \"\"\"\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n if len(goodJets) < 2 or len(fatjets) < 2:\n continue\n for jet in goodJets:\n if abs(jet.partonFlavour) == 5:\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif abs(jet.partonFlavour) == 4:\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\nh2_Eff_b = ROOT.TEfficiency('h2_BTaggingEff_b',\n 'bjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')\nh2_Eff_c = ROOT.TEfficiency('h2_BTaggingEff_c',\n 'cjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')\nh2_Eff_udsg = ROOT.TEfficiency('h2_BTaggingEff_udsg',\n 'light jet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins\n )\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\nendTime = datetime.datetime.now()\nprint('Ending running at ' + str(endTime))\n",
"step-4": "import os\nimport sys\nimport ROOT\nimport math\nimport datetime\nimport copy\nfrom array import array\nfrom skimtree_utils_ssWW_wFakes_old import *\nif not '_UL' in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelif sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\nelse:\n from samples.samplesUL import *\n Debug = True\nsample = sample_dict[sys.argv[1]]\npart_idx = sys.argv[2]\nfile_list = list(map(str, sys.argv[3].strip('[]').split(',')))\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\nstartTime = datetime.datetime.now()\nprint('Starting running at ' + str(startTime))\nROOT.gROOT.SetBatch()\nleadingjet_ptcut = 150.0\nchain = ROOT.TChain('Events')\nfor infile in file_list:\n chain.Add(infile)\nprint('Number of events in chain ' + str(chain.GetEntries()))\ntree = InputTree(chain)\nprint('Number of entries: ' + str(tree.GetEntries()))\nisMC = True\nif 'Data' in sample.name:\n isMC = False\nIsDim8 = False\nif 'aQGC' in sample.name:\n IsDim8 = True\ndataEle = False\ndataMu = False\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\nusername = str(os.environ.get('USER'))\ninituser = str(os.environ.get('USER')[0])\noutTreeFile = ROOT.TFile(sample.label + '_part' + str(part_idx) + '.root',\n 'RECREATE')\nptNBins = 100\nptMin = 0\nptMax = 1000.0\netaNBins = 60\netaMin = -3.0\netaMax = 3.0\nptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])\netabins = array.array('d', [0.0, 0.8, 1.6, 2.4])\nnptbins = len(ptbins) - 1\nnetabins = len(etabins) - 1\nh2_BTaggingEff_Denom_b = ROOT.TH2D('h2_BTaggingEff_Denom_b',\n 'MC bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_c = ROOT.TH2D('h2_BTaggingEff_Denom_c',\n 'MC cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_udsg = ROOT.TH2D('h2_BTaggingEff_Denom_udsg',\n 'MC ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_b = ROOT.TH2D('h2_BTaggingEff_Num_b',\n 'Tagged bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_c = ROOT.TH2D('h2_BTaggingEff_Num_c',\n 'Tagged cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_udsg = ROOT.TH2D('h2_BTaggingEff_Num_udsg',\n 'Tagged ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\nfor i in range(tree.GetEntries()):\n if Debug:\n if i > 100:\n break\n if not Debug and i % 5000 == 0:\n print('Event #', i + 1, ' out of ', tree.GetEntries())\n event = Event(tree, i)\n electrons = Collection(event, 'Electron')\n muons = Collection(event, 'Muon')\n jets = Collection(event, 'Jet')\n njets = len(jets)\n fatjets = Collection(event, 'FatJet')\n HLT = Object(event, 'HLT')\n PV = Object(event, 'PV')\n Flag = Object(event, 'Flag')\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n year = sample.year\n if isMC:\n runPeriod = ''\n else:\n runPeriod = sample.runP\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,\n Flag)\n if noTrigger:\n continue\n \"\"\"\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n \"\"\"\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n if len(goodJets) < 2 or len(fatjets) < 2:\n continue\n for jet in goodJets:\n if abs(jet.partonFlavour) == 5:\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif abs(jet.partonFlavour) == 4:\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\nh2_Eff_b = ROOT.TEfficiency('h2_BTaggingEff_b',\n 'bjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')\nh2_Eff_c = ROOT.TEfficiency('h2_BTaggingEff_c',\n 'cjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')\nh2_Eff_udsg = ROOT.TEfficiency('h2_BTaggingEff_udsg',\n 'light jet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins\n )\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\nendTime = datetime.datetime.now()\nprint('Ending running at ' + str(endTime))\n",
"step-5": "#!/bin/env python3\nimport os\n##print(os.environ)\n##print(\"**********************************************************************\")\n##print(\"**********************************************************************\")\n##print(\"**********************************************************************\")\n##print(str(os.environ.get('PYTHONPATH')))\n##print(str(os.environ.get('PYTHON3PATH')))\nimport sys\n##print(\"*************** This is system version info ***************************\")\n##print(sys.version_info)\n#import platform\n##print(\"*************** This is python version info ***************************\")\n##print(platform.python_version())\nimport ROOT\n##print(\"Succesfully imported ROOT\")\nimport math\nimport datetime\nimport copy\nfrom array import array\nfrom skimtree_utils_ssWW_wFakes_old import *\n\nif not \"_UL\" in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelse:\n if sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\n else:\n from samples.samplesUL import *\n Debug = True\n\nsample = sample_dict[sys.argv[1]]\npart_idx = sys.argv[2]\nfile_list = list(map(str, sys.argv[3].strip('[]').split(',')))\n#print(\"file_list: \", file_list, \"\\nloop #1 over it\")\n#for infile in file_list:\n #print(infile)\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n\n idxx = 0.\n idxy = nybins + 1 \n for ix in range(nxbins):\n idxx = ix + 1 \n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n\n idxx = nxbins + 1 \n idxy = 0. \n for iy in range(nybins):\n idxy = iy + 1 \n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\nstartTime = datetime.datetime.now()\nprint(\"Starting running at \" + str(startTime))\n\nROOT.gROOT.SetBatch()\n\nleadingjet_ptcut = 150.\n\nchain = ROOT.TChain('Events')\n#print(chain)\n#print(\"loop #2 over file_list\")\nfor infile in file_list: \n #print(\"Adding %s to the chain\" %(infile))\n chain.Add(infile)\n\nprint(\"Number of events in chain \" + str(chain.GetEntries()))\n#print(\"Number of events in tree from chain \" + str((chain.GetTree()).GetEntries()))\n#print(\"Type of tree from chain \" + str(type(chain.GetTree())))\n#treechain = (ROOT.TTree)(chain.GetTree())\ntree = InputTree(chain)\nprint(\"Number of entries: \" +str(tree.GetEntries()))\n#print(\"tree: \", tree)\n\nisMC = True\nif ('Data' in sample.name):\n isMC = False\n\n#MCReco = MCReco * isMC\n\nIsDim8 = False\nif 'aQGC' in sample.name:\n IsDim8 = True\n\ndataEle = False\ndataMu = False\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\n\nusername = str(os.environ.get('USER'))\ninituser = str(os.environ.get('USER')[0])\n#folder = 'vbtag'\n#if not os.path.exists(\"/eos/user/\" + inituser + \"/\" + username + \"/VBS/nosynch/\" + folder + \"/\" + sample.label):\n #os.makedirs(\"/eos/user/\" + inituser + \"/\" + username +\"/VBS/nosynch/\" + folder + \"/\" + sample.label)\n#outpath = \"/eos/user/\" + inituser + \"/\" + username +\"/VBS/nosynch/\" + folder + \"/\" + sample.label + \"/\"\n#++++++++++++++++++++++++++++++++++\n#++ branching the new trees ++\n#++++++++++++++++++++++++++++++++++\n#print(outpath + sample.label+\"_part\"+str(part_idx)+\".root\")\noutTreeFile = ROOT.TFile(sample.label+\"_part\"+str(part_idx)+\".root\", \"RECREATE\") #some name of the output file\n\n#++++++++++++++++++++++++++++++++++\n#++ All category ++\n#++++++++++++++++++++++++++++++++++\n\n#++++++++++++++++++++++++++++++++++\n#++ Efficiency studies ++\n#++++++++++++++++++++++++++++++++++\nptNBins = 100\nptMin = 0\nptMax = 1000.\netaNBins = 60\netaMin = -3.\netaMax = 3.\nptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])\netabins = array.array('d', [0.0, 0.8, 1.6, 2.4])\nnptbins = len(ptbins)-1\nnetabins = len(etabins)-1\nh2_BTaggingEff_Denom_b = ROOT.TH2D(\"h2_BTaggingEff_Denom_b\", \"MC bjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_c = ROOT.TH2D(\"h2_BTaggingEff_Denom_c\", \"MC cjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_udsg = ROOT.TH2D(\"h2_BTaggingEff_Denom_udsg\", \"MC ljet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_b = ROOT.TH2D(\"h2_BTaggingEff_Num_b\", \"Tagged bjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_c = ROOT.TH2D(\"h2_BTaggingEff_Num_c\", \"Tagged cjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_udsg = ROOT.TH2D(\"h2_BTaggingEff_Num_udsg\", \"Tagged ljet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\n\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\n\n#++++++++++++++++++++++++++++++++++\n#++ looping over the events ++\n#++++++++++++++++++++++++++++++++++\nfor i in range(tree.GetEntries()):\n #++++++++++++++++++++++++++++++++++\n #++ taking objects ++\n #++++++++++++++++++++++++++++++++++\n if Debug:\n if i > 100:\n break\n if not Debug and i%5000 == 0:\n print(\"Event #\", i+1, \" out of \", tree.GetEntries())\n event = Event(tree,i)\n electrons = Collection(event, \"Electron\")\n muons = Collection(event, \"Muon\")\n jets = Collection(event, \"Jet\")\n njets = len(jets)\n fatjets = Collection(event, \"FatJet\")\n HLT = Object(event, \"HLT\")\n PV = Object(event, \"PV\")\n Flag = Object(event, 'Flag')\n\n #++++++++++++++++++++++++++++++++++\n #++ defining variables ++\n #++++++++++++++++++++++++++++++++++\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n #++++++++++++++++++++++++++++++++++\n #++ starting the analysis ++\n #++++++++++++++++++++++++++++++++++\n #VetoMu = get_LooseMu(muons)\n #goodMu = get_Mu(muons)\n #VetoEle = get_LooseEle(electrons)\n #goodEle = get_Ele(electrons)\n year = sample.year\n if(isMC):\n runPeriod = ''\n else:\n runPeriod = sample.runP\n\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n\n #print \"------ \", i\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod, Flag)\n\n if noTrigger:\n continue\n\n '''\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n '''\n\n ######################################\n ## Selecting only jets with pt>30 ##\n ######################################\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n\n if (len(goodJets) < 2 or len(fatjets) < 2):\n continue\n\n for jet in goodJets:\n if(abs(jet.partonFlavour) == 5):\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif(abs(jet.partonFlavour) == 4):\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\n\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\n\nh2_Eff_b = ROOT.TEfficiency(\"h2_BTaggingEff_b\", \"bjet efficiency;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, \"\")\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, \"\")\n\nh2_Eff_c = ROOT.TEfficiency(\"h2_BTaggingEff_c\", \"cjet efficiency;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, \"\")\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, \"\")\n\nh2_Eff_udsg = ROOT.TEfficiency(\"h2_BTaggingEff_udsg\", \"light jet efficiency;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, \"\")\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, \"\")\n\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\n\nendTime = datetime.datetime.now()\nprint(\"Ending running at \" + str(endTime))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('x is {}'.format(x))
print(type(x))
<|reserved_special_token_0|>
print('x is {}'.format(x))
print(type(x))
<|reserved_special_token_1|>
x = 7
x = 7 // 3
<|reserved_special_token_0|>
x = 0.1 + 0.1 + 0.1 - 0.3
print('x is {}'.format(x))
print(type(x))
<|reserved_special_token_0|>
a = Decimal('.10')
b = Decimal('.30')
x = a + a + a - b
print('x is {}'.format(x))
print(type(x))
<|reserved_special_token_1|>
x = 7
x = 7 // 3
from decimal import *
x = 0.1 + 0.1 + 0.1 - 0.3
print('x is {}'.format(x))
print(type(x))
from decimal import *
a = Decimal('.10')
b = Decimal('.30')
x = a + a + a - b
print('x is {}'.format(x))
print(type(x))
<|reserved_special_token_1|>
# =============>This is a Normal mathematical tasks<==========
x = 7
x = 7 // 3 # rounds the number = 2 ans class int
#x = 7 / 3 # gives the floating number = 2.33333335 ans class float
#x = 7 % 3 # gives the reminder = 1 ans class int
#print("x is {}" .format(x))
#print(type(x))
# ================>This is how to add decimal accuracy vs procession<================
# x = .1 + .1 + .1 -.3 the answer is 5.551115123125783 because python doe not understand accuracy and precision to overcome do the import * from decimal
from decimal import *
x = .1 + .1 + .1 -.3
print("x is {}" .format(x))
print(type(x))
# =============>How to solve the above problem accuracy<===============
# And the type is class decimal.Decimal
# When dealing with money use this method
from decimal import *
a = Decimal('.10') # it will conver from string
b = Decimal('.30')
x = a + a + a - b
print("x is {}" .format(x))
print(type(x))
|
flexible
|
{
"blob_id": "62a7958ba5ebb6da866d6ef156e52136df22f235",
"index": 107,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('x is {}'.format(x))\nprint(type(x))\n<mask token>\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-3": "x = 7\nx = 7 // 3\n<mask token>\nx = 0.1 + 0.1 + 0.1 - 0.3\nprint('x is {}'.format(x))\nprint(type(x))\n<mask token>\na = Decimal('.10')\nb = Decimal('.30')\nx = a + a + a - b\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-4": "x = 7\nx = 7 // 3\nfrom decimal import *\nx = 0.1 + 0.1 + 0.1 - 0.3\nprint('x is {}'.format(x))\nprint(type(x))\nfrom decimal import *\na = Decimal('.10')\nb = Decimal('.30')\nx = a + a + a - b\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-5": "\n# =============>This is a Normal mathematical tasks<==========\nx = 7\nx = 7 // 3 # rounds the number = 2 ans class int\n#x = 7 / 3 # gives the floating number = 2.33333335 ans class float\n#x = 7 % 3 # gives the reminder = 1 ans class int\n\n#print(\"x is {}\" .format(x))\n#print(type(x))\n# ================>This is how to add decimal accuracy vs procession<================\n# x = .1 + .1 + .1 -.3 the answer is 5.551115123125783 because python doe not understand accuracy and precision to overcome do the import * from decimal\nfrom decimal import *\nx = .1 + .1 + .1 -.3\nprint(\"x is {}\" .format(x))\nprint(type(x))\n# =============>How to solve the above problem accuracy<===============\n# And the type is class decimal.Decimal\n# When dealing with money use this method\nfrom decimal import *\na = Decimal('.10') # it will conver from string\nb = Decimal('.30')\nx = a + a + a - b\nprint(\"x is {}\" .format(x))\nprint(type(x))\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Classifier(nn.Module):
def __init__(self, args, prob=0.5):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(48 * 4 * 4, 100)
self.bn1_fc = nn.BatchNorm1d(100)
self.fc2 = nn.Linear(100, 100)
self.bn2_fc = nn.BatchNorm1d(100)
self.fc3 = nn.Linear(100, 10)
self.bn_fc3 = nn.BatchNorm1d(10)
self.prob = prob
self.use_drop = args.use_drop
self.use_bn = args.use_bn
self.use_gumbel = args.use_gumbel
<|reserved_special_token_0|>
class Generator(nn.Module):
def __init__(self, nz=100):
super(Generator, self).__init__()
self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,
bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.
ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(
256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias
=False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d
(128, 1, 4, 2, 1, bias=False), nn.Tanh())
def forward(self, x):
x = self.network(x)
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Encoder(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Classifier(nn.Module):
def __init__(self, args, prob=0.5):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(48 * 4 * 4, 100)
self.bn1_fc = nn.BatchNorm1d(100)
self.fc2 = nn.Linear(100, 100)
self.bn2_fc = nn.BatchNorm1d(100)
self.fc3 = nn.Linear(100, 10)
self.bn_fc3 = nn.BatchNorm1d(10)
self.prob = prob
self.use_drop = args.use_drop
self.use_bn = args.use_bn
self.use_gumbel = args.use_gumbel
def forward(self, x):
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn1_fc(self.fc1(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn2_fc(self.fc2(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = self.fc3(x)
return x
class Generator(nn.Module):
def __init__(self, nz=100):
super(Generator, self).__init__()
self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,
bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.
ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(
256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias
=False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d
(128, 1, 4, 2, 1, bias=False), nn.Tanh())
def forward(self, x):
x = self.network(x)
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)
self.bn2 = nn.BatchNorm2d(48)
<|reserved_special_token_0|>
class Classifier(nn.Module):
def __init__(self, args, prob=0.5):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(48 * 4 * 4, 100)
self.bn1_fc = nn.BatchNorm1d(100)
self.fc2 = nn.Linear(100, 100)
self.bn2_fc = nn.BatchNorm1d(100)
self.fc3 = nn.Linear(100, 10)
self.bn_fc3 = nn.BatchNorm1d(10)
self.prob = prob
self.use_drop = args.use_drop
self.use_bn = args.use_bn
self.use_gumbel = args.use_gumbel
def forward(self, x):
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn1_fc(self.fc1(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn2_fc(self.fc2(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = self.fc3(x)
return x
class Generator(nn.Module):
def __init__(self, nz=100):
super(Generator, self).__init__()
self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,
bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.
ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(
256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias
=False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d
(128, 1, 4, 2, 1, bias=False), nn.Tanh())
def forward(self, x):
x = self.network(x)
return x
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)
self.bn2 = nn.BatchNorm2d(48)
def forward(self, x):
x = torch.mean(x, 1).view(x.size()[0], 1, x.size()[2], x.size()[3])
x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), stride=2,
kernel_size=2, dilation=(1, 1))
x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), stride=2,
kernel_size=2, dilation=(1, 1))
x = x.view(x.size(0), 48 * 4 * 4)
return x
class Classifier(nn.Module):
def __init__(self, args, prob=0.5):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(48 * 4 * 4, 100)
self.bn1_fc = nn.BatchNorm1d(100)
self.fc2 = nn.Linear(100, 100)
self.bn2_fc = nn.BatchNorm1d(100)
self.fc3 = nn.Linear(100, 10)
self.bn_fc3 = nn.BatchNorm1d(10)
self.prob = prob
self.use_drop = args.use_drop
self.use_bn = args.use_bn
self.use_gumbel = args.use_gumbel
def forward(self, x):
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn1_fc(self.fc1(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn2_fc(self.fc2(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = self.fc3(x)
return x
class Generator(nn.Module):
def __init__(self, nz=100):
super(Generator, self).__init__()
self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,
bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.
ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(
256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias
=False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d
(128, 1, 4, 2, 1, bias=False), nn.Tanh())
def forward(self, x):
x = self.network(x)
return x
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)
self.bn2 = nn.BatchNorm2d(48)
def forward(self, x):
x = torch.mean(x, 1).view(x.size()[0], 1, x.size()[2], x.size()[3])
x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), stride=2, kernel_size=2, dilation=(1, 1))
x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), stride=2, kernel_size=2, dilation=(1, 1))
#print(x.size())
x = x.view(x.size(0), 48*4*4)
return x
class Classifier(nn.Module):
def __init__(self, args, prob=0.5):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(48*4*4, 100)
self.bn1_fc = nn.BatchNorm1d(100)
self.fc2 = nn.Linear(100, 100)
self.bn2_fc = nn.BatchNorm1d(100)
self.fc3 = nn.Linear(100, 10)
self.bn_fc3 = nn.BatchNorm1d(10)
self.prob = prob
self.use_drop = args.use_drop
self.use_bn = args.use_bn
self.use_gumbel = args.use_gumbel
def forward(self, x):
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn1_fc(self.fc1(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn2_fc(self.fc2(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = self.fc3(x)
return x
class Generator(nn.Module):
def __init__(self, nz=100):
super(Generator, self).__init__()
self.network = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, 512, 4, 1, 0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(512, 256, 3, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# state size. (ngf*2) x 8 x 8
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# state size. (ngf) x 16 x 16
nn.ConvTranspose2d(128, 1, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 32 x 32
)
def forward(self, x):
# print(x.shape) # torch.Size([64, 100, 1, 1])
x = self.network(x)
# print(x.shape) # torch.Size([64, 1, 28, 28])
return x
|
flexible
|
{
"blob_id": "9140da0b6c04f39a987a177d56321c56c01586e8",
"index": 3739,
"step-1": "<mask token>\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n <mask token>\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-2": "<mask token>\n\n\nclass Encoder(nn.Module):\n <mask token>\n <mask token>\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-3": "<mask token>\n\n\nclass Encoder(nn.Module):\n\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)\n self.bn2 = nn.BatchNorm2d(48)\n <mask token>\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)\n self.bn2 = nn.BatchNorm2d(48)\n\n def forward(self, x):\n x = torch.mean(x, 1).view(x.size()[0], 1, x.size()[2], x.size()[3])\n x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), stride=2,\n kernel_size=2, dilation=(1, 1))\n x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), stride=2,\n kernel_size=2, dilation=(1, 1))\n x = x.view(x.size(0), 48 * 4 * 4)\n return x\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)\n self.bn2 = nn.BatchNorm2d(48)\n\n def forward(self, x):\n x = torch.mean(x, 1).view(x.size()[0], 1, x.size()[2], x.size()[3])\n x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), stride=2, kernel_size=2, dilation=(1, 1))\n x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), stride=2, kernel_size=2, dilation=(1, 1))\n #print(x.size())\n x = x.view(x.size(0), 48*4*4)\n return x\n\n\nclass Classifier(nn.Module):\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48*4*4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d(nz, 512, 4, 1, 0, bias=False),\n nn.BatchNorm2d(512),\n nn.ReLU(True),\n\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(512, 256, 3, 2, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n\n # state size. (ngf*2) x 8 x 8\n nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n\n # state size. (ngf) x 16 x 16\n nn.ConvTranspose2d(128, 1, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 32 x 32\n )\n\n def forward(self, x):\n # print(x.shape) # torch.Size([64, 100, 1, 1])\n x = self.network(x)\n # print(x.shape) # torch.Size([64, 1, 28, 28])\n\n return x\n\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
from django.contrib.auth.models import User
from django.core import validators
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import Group
from django.conf import settings
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def assign_group(sender, instance, created, **kwargs):
"""Сигнал, добавляющий созданного пользователя в группу editors"""
if created:
editors_group = Group.objects.get(name='editors')
instance.groups.add(editors_group)
class Employee(models.Model):
"""Сотрудники"""
name = models.CharField("Имя", max_length=100)
age = models.PositiveSmallIntegerField("Возраст", validators=[validators.MaxValueValidator(120),
validators.MinValueValidator(18)])
position = models.CharField("Должность", max_length=60)
photo = models.ImageField("Фото", upload_to="employees/")
achievements = models.TextField("Достижения", max_length=2000,
help_text="Информация об образовании, опыте, квалификации и профессиональных достижениях")
def __str__(self):
return self.name
class Meta:
verbose_name = "Сотрудник"
verbose_name_plural = "Сотрудники"
class Category(models.Model):
"""Категории"""
name = models.CharField("Категория", max_length=150)
url = models.SlugField(max_length=160, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Категория"
verbose_name_plural = "Категории"
class Service(models.Model):
"""Услуга"""
PERIOD = (
(0, ''),
(1, '6'),
(2, '12'),
(3, '24'),
)
title = models.CharField("Название", max_length=100)
description = models.TextField("Описание")
image = models.ImageField("Фото", upload_to="services/", null=True, blank=True)
employee = models.ManyToManyField(Employee, verbose_name="Cотрудник", related_name="service_employee")
category = models.ForeignKey(Category, verbose_name="Категория", on_delete=models.SET_NULL, null=True)
warranty = models.PositiveSmallIntegerField("Гарантийный срок", choices=PERIOD, help_text="Указать в месяцах")
price = models.DecimalField("Стоимость услуги", max_digits=9, decimal_places=2, default=0,
help_text="Указывать сумму в рублях", validators=[validators.MinValueValidator(0)])
url = models.SlugField(max_length=130, unique=True)
def __str__(self):
return self.title
class Meta:
verbose_name = "Услуга"
verbose_name_plural = "Услуги"
|
normal
|
{
"blob_id": "a139042d0c6fa4941b7149a33b0a48018e9f511b",
"index": 9003,
"step-1": "<mask token>\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-2": "<mask token>\n\n\nclass Employee(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = 'Сотрудник'\n verbose_name_plural = 'Сотрудники'\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-3": "<mask token>\n\n\nclass Employee(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Сотрудник'\n verbose_name_plural = 'Сотрудники'\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-4": "from django.contrib.auth.models import User\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import Group\nfrom django.conf import settings\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef assign_group(sender, instance, created, **kwargs):\n \"\"\"Сигнал, добавляющий созданного пользователя в группу editors\"\"\"\n if created:\n editors_group = Group.objects.get(name='editors')\n instance.groups.add(editors_group)\n\n\nclass Employee(models.Model):\n \"\"\"Сотрудники\"\"\"\n name = models.CharField('Имя', max_length=100)\n age = models.PositiveSmallIntegerField('Возраст', validators=[\n validators.MaxValueValidator(120), validators.MinValueValidator(18)])\n position = models.CharField('Должность', max_length=60)\n photo = models.ImageField('Фото', upload_to='employees/')\n achievements = models.TextField('Достижения', max_length=2000,\n help_text=\n 'Информация об образовании, опыте, квалификации и профессиональных достижениях'\n )\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Сотрудник'\n verbose_name_plural = 'Сотрудники'\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-5": "from django.contrib.auth.models import User\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import Group\n\nfrom django.conf import settings\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef assign_group(sender, instance, created, **kwargs):\n \"\"\"Сигнал, добавляющий созданного пользователя в группу editors\"\"\"\n\n if created:\n editors_group = Group.objects.get(name='editors')\n instance.groups.add(editors_group)\n\n\nclass Employee(models.Model):\n \"\"\"Сотрудники\"\"\"\n\n name = models.CharField(\"Имя\", max_length=100)\n age = models.PositiveSmallIntegerField(\"Возраст\", validators=[validators.MaxValueValidator(120),\n validators.MinValueValidator(18)])\n position = models.CharField(\"Должность\", max_length=60)\n photo = models.ImageField(\"Фото\", upload_to=\"employees/\")\n achievements = models.TextField(\"Достижения\", max_length=2000,\n help_text=\"Информация об образовании, опыте, квалификации и профессиональных достижениях\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Сотрудник\"\n verbose_name_plural = \"Сотрудники\"\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n\n name = models.CharField(\"Категория\", max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Категория\"\n verbose_name_plural = \"Категории\"\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n\n PERIOD = (\n (0, ''),\n (1, '6'),\n (2, '12'),\n (3, '24'),\n )\n\n title = models.CharField(\"Название\", max_length=100)\n description = models.TextField(\"Описание\")\n image = models.ImageField(\"Фото\", upload_to=\"services/\", null=True, blank=True)\n employee = models.ManyToManyField(Employee, verbose_name=\"Cотрудник\", related_name=\"service_employee\")\n category = models.ForeignKey(Category, verbose_name=\"Категория\", on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField(\"Гарантийный срок\", choices=PERIOD, help_text=\"Указать в месяцах\")\n price = models.DecimalField(\"Стоимость услуги\", max_digits=9, decimal_places=2, default=0,\n help_text=\"Указывать сумму в рублях\", validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = \"Услуга\"\n verbose_name_plural = \"Услуги\"\n",
"step-ids": [
8,
9,
10,
14,
15
]
}
|
[
8,
9,
10,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .game_action_manager import GameActionManager
from .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager
<|reserved_special_token_1|>
# -*- coding:Utf-8 -*-
from .game_action_manager import GameActionManager
from .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager
|
flexible
|
{
"blob_id": "48294209d51fbe4dfb2a5130311a10c8a1dd027c",
"index": 9237,
"step-1": "<mask token>\n",
"step-2": "from .game_action_manager import GameActionManager\nfrom .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager\n",
"step-3": "# -*- coding:Utf-8 -*-\n\n\nfrom .game_action_manager import GameActionManager\nfrom .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
seed_everything(12345)
<|reserved_special_token_0|>
if torch.cuda.is_available():
classifier = classifier.cuda()
trainer.fit(classifier, dm)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
seed_everything(12345)
dm = ChestDataModule(['chexpert_14'], batch_size=32, num_workers=2,
balanced=False)
dm.train_transforms = ChestTrainTransforms(height=224)
dm.val_transforms = ChestValTransforms(height=224)
classifier = BaseLineClassifier(get_model('resnet18', pretrained=True),
num_classes=14, linear=False, learning_rate=1e-05, b1=0.9, b2=0.999,
weight_decay=0.0001, multi_class=True, mixup=False, ct_reg=False)
wandb_logger = WandbLogger(name='baseline-NL-chexpert_14-full-Adam-1e_5',
project='thesis')
checkpoint_callback = ModelCheckpoint(monitor='val_loss', dirpath=
'logs/baseline/chexpert_14/', filename=
'NL-full-Adam-1e_5-{epoch:02d}-{val_loss:.4f}')
trainer = pl.Trainer(gpus=1, deterministic=True, logger=wandb_logger,
callbacks=[checkpoint_callback], max_epochs=20, num_sanity_val_steps=10)
if torch.cuda.is_available():
classifier = classifier.cuda()
trainer.fit(classifier, dm)
<|reserved_special_token_1|>
from data_loaders.data_module import ChestDataModule
from utils.visualisation import showInRow
from models import get_model
from transforms.finetuning import ChestTrainTransforms, ChestValTransforms
from models.baseline import BaseLineClassifier
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
seed_everything(12345)
dm = ChestDataModule(['chexpert_14'], batch_size=32, num_workers=2,
balanced=False)
dm.train_transforms = ChestTrainTransforms(height=224)
dm.val_transforms = ChestValTransforms(height=224)
classifier = BaseLineClassifier(get_model('resnet18', pretrained=True),
num_classes=14, linear=False, learning_rate=1e-05, b1=0.9, b2=0.999,
weight_decay=0.0001, multi_class=True, mixup=False, ct_reg=False)
wandb_logger = WandbLogger(name='baseline-NL-chexpert_14-full-Adam-1e_5',
project='thesis')
checkpoint_callback = ModelCheckpoint(monitor='val_loss', dirpath=
'logs/baseline/chexpert_14/', filename=
'NL-full-Adam-1e_5-{epoch:02d}-{val_loss:.4f}')
trainer = pl.Trainer(gpus=1, deterministic=True, logger=wandb_logger,
callbacks=[checkpoint_callback], max_epochs=20, num_sanity_val_steps=10)
if torch.cuda.is_available():
classifier = classifier.cuda()
trainer.fit(classifier, dm)
<|reserved_special_token_1|>
from data_loaders.data_module import ChestDataModule
from utils.visualisation import showInRow
from models import get_model
from transforms.finetuning import ChestTrainTransforms, ChestValTransforms
from models.baseline import BaseLineClassifier
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
seed_everything(12345)
dm = ChestDataModule(["chexpert_14"], batch_size=32, num_workers=2, balanced=False)
dm.train_transforms = ChestTrainTransforms(height=224)
dm.val_transforms = ChestValTransforms(height=224)
classifier = BaseLineClassifier(get_model("resnet18", pretrained=True),
num_classes=14,
linear=False,
learning_rate=1e-5,
b1=0.9,
b2=0.999,
weight_decay=1e-4,
multi_class=True,
mixup=False,
ct_reg=False)
wandb_logger = WandbLogger(name='baseline-NL-chexpert_14-full-Adam-1e_5',project='thesis')
checkpoint_callback = ModelCheckpoint(monitor='val_loss',
dirpath='logs/baseline/chexpert_14/',
filename='NL-full-Adam-1e_5-{epoch:02d}-{val_loss:.4f}')
trainer = pl.Trainer(gpus=1, deterministic=True,
logger=wandb_logger, callbacks=[checkpoint_callback], max_epochs=20, num_sanity_val_steps=10)
if torch.cuda.is_available():
classifier = classifier.cuda()
trainer.fit(classifier, dm)
|
flexible
|
{
"blob_id": "05ca7bbc3285a9e37921c0e514a2e31b05abe051",
"index": 6396,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nseed_everything(12345)\n<mask token>\nif torch.cuda.is_available():\n classifier = classifier.cuda()\ntrainer.fit(classifier, dm)\n",
"step-3": "<mask token>\nseed_everything(12345)\ndm = ChestDataModule(['chexpert_14'], batch_size=32, num_workers=2,\n balanced=False)\ndm.train_transforms = ChestTrainTransforms(height=224)\ndm.val_transforms = ChestValTransforms(height=224)\nclassifier = BaseLineClassifier(get_model('resnet18', pretrained=True),\n num_classes=14, linear=False, learning_rate=1e-05, b1=0.9, b2=0.999,\n weight_decay=0.0001, multi_class=True, mixup=False, ct_reg=False)\nwandb_logger = WandbLogger(name='baseline-NL-chexpert_14-full-Adam-1e_5',\n project='thesis')\ncheckpoint_callback = ModelCheckpoint(monitor='val_loss', dirpath=\n 'logs/baseline/chexpert_14/', filename=\n 'NL-full-Adam-1e_5-{epoch:02d}-{val_loss:.4f}')\ntrainer = pl.Trainer(gpus=1, deterministic=True, logger=wandb_logger,\n callbacks=[checkpoint_callback], max_epochs=20, num_sanity_val_steps=10)\nif torch.cuda.is_available():\n classifier = classifier.cuda()\ntrainer.fit(classifier, dm)\n",
"step-4": "from data_loaders.data_module import ChestDataModule\nfrom utils.visualisation import showInRow\nfrom models import get_model\nfrom transforms.finetuning import ChestTrainTransforms, ChestValTransforms\nfrom models.baseline import BaseLineClassifier\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nimport torch\nimport pytorch_lightning as pl\nfrom pytorch_lightning import seed_everything\nseed_everything(12345)\ndm = ChestDataModule(['chexpert_14'], batch_size=32, num_workers=2,\n balanced=False)\ndm.train_transforms = ChestTrainTransforms(height=224)\ndm.val_transforms = ChestValTransforms(height=224)\nclassifier = BaseLineClassifier(get_model('resnet18', pretrained=True),\n num_classes=14, linear=False, learning_rate=1e-05, b1=0.9, b2=0.999,\n weight_decay=0.0001, multi_class=True, mixup=False, ct_reg=False)\nwandb_logger = WandbLogger(name='baseline-NL-chexpert_14-full-Adam-1e_5',\n project='thesis')\ncheckpoint_callback = ModelCheckpoint(monitor='val_loss', dirpath=\n 'logs/baseline/chexpert_14/', filename=\n 'NL-full-Adam-1e_5-{epoch:02d}-{val_loss:.4f}')\ntrainer = pl.Trainer(gpus=1, deterministic=True, logger=wandb_logger,\n callbacks=[checkpoint_callback], max_epochs=20, num_sanity_val_steps=10)\nif torch.cuda.is_available():\n classifier = classifier.cuda()\ntrainer.fit(classifier, dm)\n",
"step-5": "from data_loaders.data_module import ChestDataModule\nfrom utils.visualisation import showInRow\nfrom models import get_model\n\nfrom transforms.finetuning import ChestTrainTransforms, ChestValTransforms\n\nfrom models.baseline import BaseLineClassifier\n\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\nimport torch\nimport pytorch_lightning as pl\nfrom pytorch_lightning import seed_everything\nseed_everything(12345)\n\n\ndm = ChestDataModule([\"chexpert_14\"], batch_size=32, num_workers=2, balanced=False)\ndm.train_transforms = ChestTrainTransforms(height=224)\ndm.val_transforms = ChestValTransforms(height=224)\n\nclassifier = BaseLineClassifier(get_model(\"resnet18\", pretrained=True), \n num_classes=14, \n linear=False,\n learning_rate=1e-5,\n b1=0.9,\n b2=0.999,\n weight_decay=1e-4,\n multi_class=True,\n mixup=False,\n ct_reg=False)\n\n\nwandb_logger = WandbLogger(name='baseline-NL-chexpert_14-full-Adam-1e_5',project='thesis')\ncheckpoint_callback = ModelCheckpoint(monitor='val_loss', \n dirpath='logs/baseline/chexpert_14/', \n filename='NL-full-Adam-1e_5-{epoch:02d}-{val_loss:.4f}')\n\ntrainer = pl.Trainer(gpus=1, deterministic=True,\n logger=wandb_logger, callbacks=[checkpoint_callback], max_epochs=20, num_sanity_val_steps=10)\n\nif torch.cuda.is_available():\n classifier = classifier.cuda()\n\ntrainer.fit(classifier, dm)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from card import Card;
from deck import Deck;
import people;
import chip;
import sys;
import time;
def display_instructions() :
print('\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 ');
print('as possible without going over. The numbered cards have the value of their number, face cards have ');
print('a value of 10 each, and the ace can either be counted as 1 or 11 (player\'s choice)\n');
print('Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to ');
print('each player (up to 7 players) and to the dealer. The player\'s cards will be face up while one of the ');
print('dealer\'s cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n');
print(' Hit: when a player \'hits,\' he or she is dealt another card. A player can hit as many ');
print(' times as wanted, up until the player busts (goes over 21). \n');
print(' Stand: To \'stand\' means to stay with the current cards. \n');
print(' Split: A player can \'split\' only when the first two cards of his or her hand are the ');
print(' same. When this occurs, the player makes two separate piles, one with each ');
print(' identical card, and places a bet identical to the initial bet for the second ');
print(' pile. Then, the player can hit or stand with each pile as in a normal round.\n');
print(' Double Down: When a player chooses to \'double down\', he or she can increase the current bet ');
print(' by 100% in exchange for agreeing to stand after being dealt one more card.\n');
input('Ready to play? Hit any key to continue: ');
print();
def get_num_players() :
num = input('How many people will be playing (up to 7)? Enter a number: ');
while not num.isdigit() or int(num) < 1 or int(num) > 7:
num = input('Please enter a number from 1 to 7: ');
print('\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n');
time.sleep(1);
return int(num);
def create_players(num) :
players_list = [];
for i in range(num) :
name = input(f'Player {i+1}, what is your name? ');
while name == '':
name = input('Please enter your name: ');
players_list.append(people.Player(name, 1000));
print('\nAll players will begin the game with the same amount of $1,000 dollars.\n');
return players_list;
def deal(dealer, players) :
for player in players[:-1] :
if not player.check_broke() : dealer.deal_card(player);
dealer.deal_card(players[-1]); # dealer deals card to dealer, too
def place_bets(players) :
print('Now, each of you must place your bets.\n');
bets = [];
for player in players[:-1] : # doesn't reach dealer
if not player.check_broke() :
bet = input(f'Bet for {player.name}: ');
while not bet.isdigit() or int(bet) > player.money :
msg = 'Please enter a whole number: ';
if bet.isdigit() :
msg = 'You don\'t have enough money! Enter a different value: ';
bet = input(msg);
player.bet = int(bet);
print();
def view_hands(players) :
print('Here are the hands for each player: \n');
for p in players :
if isinstance(p, people.Dealer) :
print(f'{p.name}: [{p.hand[0][0]}, ?]', end='');
print();
else :
if not p.check_broke() :
print(f'{p.name}: {p.hand}', end='');
if p.check_blackjack() :
print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!');
else : print();
print();
def do_decision(player, dealer, hand_index=0) :
choices_dict = {'s':stand, 'h':hit, 'p':split, 'd':double_down};
valid_choice = False;
while not valid_choice :
choice = input(f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): ');
while choice.lower() not in choices_dict.keys() :
choice = input('Please enter either \'s\', \'h\', \'p\', or \'d\', corresponding to your choice: ');
valid_choice = choices_dict.get(choice)(player, dealer, hand_index);
def cycle_decisions(players) :
dealer = players[-1];
for p in players :
if isinstance(p, people.Dealer) :
print(f'{p.name} will hit until reaching a hand of at least \'hard\' 17 (without an ace counting for 11).');
sys.stdout.flush();
time.sleep(0.8);
if not check_status(p) and not p.check_hard_17() : hit(p, dealer);
sys.stdout.flush();
time.sleep(0.5);
disp_str_slow('\nEnd-of-Round Earnings: \n', 0.05);
if p.check_bust() :
for i in players[:-1] :
if not i.check_broke() :
sys.stdout.flush();
time.sleep(0.5);
print(' ', end='');
for j in range(0,len(i.hand)) : # this is to loop through each hand for a player (player would have multiple hands after splitting)
if not i.check_bust(j) :
print(f'{i.name} wins ${i.bet}! ', end='');
i.money += i.bet;
else :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
i.chips = chip.convert_to_chips(i.money);
if i.check_broke() :
print(f'Sorry {i.name}, but you\'re out of money and can no longer play in this game');
else :
print(f'Current Balance: ${i.money} (Chips: {i.chips})');
else :
for i in players[:-1] :
if not i.check_broke() :
sys.stdout.flush();
time.sleep(0.5);
print(' ', end='');
for j in range(0,len(i.hand)) :
if not i.check_bust(j) :
if i.hand_value(j) > p.hand_value() :
print(f'{i.name} wins ${i.bet}! ', end='');
i.money += i.bet;
elif i.hand_value(j) < p.hand_value() :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
else :
print(f'{i.name} tied with the {p.name}! No change. ', end='');
else :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
i.chips = chip.convert_to_chips(i.money);
if i.check_broke() :
print(f'Sorry {i.name}, but you\'re out of money and can no longer play in this game');
else :
print(f'Current Balance: ${i.money} (Chips: {i.chips})');
sys.stdout.flush();
time.sleep(0.5);
else :
if not p.check_blackjack() and not p.check_broke() :
do_decision(p, dealer);
def stand(player, dealer, hand_index=0) :
print(f'{player.name} stands.\n');
return True;
def hit(player, dealer, hand_index=0) :
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
if isinstance(player, people.Dealer) :
while not player.check_hard_17() and not done:
time.sleep(0.5);
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
else :
choice = '';
if not done :
choice = input('Do you want to hit again (\'y\' or \'n\')? ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
while choice == 'y' and not done:
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
if not done :
choice = input('Do you want to hit again (\'y\' or \'n\')? ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
if not done : print();
return True;
def split(player, dealer, hand_index=0) :
if player.hand[hand_index][0] != player.hand[hand_index][1] :
print('You can\'t split on that hand! You need two identical cards to split. Choose again.');
return False;
elif player.bet*2 > player.money :
print(f'You don\'t have enough money to split with your current bet (${player.bet} * 2 = ${player.bet*2})! Choose again.');
return False;
hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]];
player.hand = hands;
print('Now you will play each hand separately: \n');
for i in range(0,2) :
print(f'For Hand #{i+1}: ');
do_decision(player, dealer, i);
return True;
def double_down(player, dealer, hand_index=0) :
if player.bet*2 > player.money :
print(f'You don\'t have enough money to do that (${player.bet} * 2 = ${player.bet*2})! Choose again.');
return False;
elif player.did_double_down :
print('You can double down only once! Choose a different option.');
return False;
player.bet *= 2;
player.did_double_down = True;
print(f'Bet increased to ${player.bet}!.');
do_decision(player, dealer, hand_index);
return True;
def check_status(player, hand_index=0) :
done = False;
hand_string = '[';
for card in player.hand[hand_index][:-1] :
hand_string += card.__str__() + ', ';
print(f'Current Hand: {hand_string}', end='');
sys.stdout.flush();
time.sleep(0.5);
disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05);
time.sleep(0.5);
if player.check_blackjack(hand_index) :
disp_str_slow(' ==> BLACKJACK!!! ', 0.05);
if not isinstance(player, people.Dealer) :
disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05);
print('\n\n', end='');
done = True;
sys.stdout.flush();
time.sleep(0.5);
elif player.check_bust(hand_index) :
disp_str_slow(' ==> BUST! ', 0.05);
if not isinstance(player, people.Dealer) :
disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05);
print('\n\n', end='');
done = True;
sys.stdout.flush();
time.sleep(0.5);
else :
print();
return done;
def play_again(players) :
print();
all_broke = True;
for i in players :
if not i.check_broke() : all_broke = False;
if not all_broke :
choice = input('Do you all want to play another round? Enter \'y\' or \'n\': ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
print();
return choice;
else :
print();
return 'n';
def reset(players) :
dealer = players[-1];
for player in players :
dealer.retrieve_cards(player);
player.bet = 0;
def display_accounts(players) :
for player in players[:-1] :
change = player.money - player.initial_money;
word = 'gain';
if change < 0 :
word = 'loss';
print(f' {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n');
sys.stdout.flush();
time.sleep(0.5);
def disp_str_slow(phrase, t) :
for i in phrase :
print(i, end='');
sys.stdout.flush();
time.sleep(t);
def print_players(players) :
for player in players :
print(player);
def main() :
display_instructions();
num_players = get_num_players();
players = create_players(num_players);
dealer = people.Dealer(Deck(6));
players.append(dealer);
replay_choice = 'y';
while replay_choice == 'y' :
reset(players);
place_bets(players);
for i in range(0,2) :
deal(dealer, players);
view_hands(players);
cycle_decisions(players);
replay_choice = play_again(players);
print('------------------------------------------------------------------------------------------------\n');
disp_str_slow('FINAL PLAYER ACCOUNTS\n\n', 0.05);
sys.stdout.flush();
time.sleep(0.5)
display_accounts(players);
sys.stdout.flush();
time.sleep(0.2)
print('------------------------------------------------------------------------------------------------\n');
print('Goodbye!');
if __name__ == '__main__' :
main();
|
normal
|
{
"blob_id": "a7050ebd545c4169b481672aed140af610aea997",
"index": 4879,
"step-1": "<mask token>\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\n<mask token>\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\n<mask token>\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\n<mask token>\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef display_instructions():\n print(\n \"\"\"\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 \"\"\"\n )\n print(\n 'as possible without going over. The numbered cards have the value of their number, face cards have '\n )\n print(\n \"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\\n\"\n )\n print(\n 'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '\n )\n print(\n \"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the \"\n )\n print(\n \"\"\"dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n\"\"\"\n )\n print(\n \" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many \"\n )\n print(\n ' times as wanted, up until the player busts (goes over 21). \\n'\n )\n print(\n \" Stand: To 'stand' means to stay with the current cards. \\n\"\n )\n print(\n \" Split: A player can 'split' only when the first two cards of his or her hand are the \"\n )\n print(\n ' same. When this occurs, the player makes two separate piles, one with each '\n )\n print(\n ' identical card, and places a bet identical to the initial bet for the second '\n )\n print(\n \"\"\" pile. Then, the player can hit or stand with each pile as in a normal round.\n\"\"\"\n )\n print(\n \" Double Down: When a player chooses to 'double down', he or she can increase the current bet \"\n )\n print(\n \"\"\" by 100% in exchange for agreeing to stand after being dealt one more card.\n\"\"\"\n )\n input('Ready to play? Hit any key to continue: ')\n print()\n\n\ndef get_num_players():\n num = input('How many people will be playing (up to 7)? Enter a number: ')\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\n num = input('Please enter a number from 1 to 7: ')\n print(\n \"\"\"\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n\"\"\"\n )\n time.sleep(1)\n return int(num)\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\ndef deal(dealer, players):\n for player in players[:-1]:\n if not player.check_broke():\n dealer.deal_card(player)\n dealer.deal_card(players[-1])\n\n\ndef place_bets(players):\n print('Now, each of you must place your bets.\\n')\n bets = []\n for player in players[:-1]:\n if not player.check_broke():\n bet = input(f'Bet for {player.name}: ')\n while not bet.isdigit() or int(bet) > player.money:\n msg = 'Please enter a whole number: '\n if bet.isdigit():\n msg = (\n \"You don't have enough money! Enter a different value: \"\n )\n bet = input(msg)\n player.bet = int(bet)\n print()\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\ndef cycle_decisions(players):\n dealer = players[-1]\n for p in players:\n if isinstance(p, people.Dealer):\n print(\n f\"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11).\"\n )\n sys.stdout.flush()\n time.sleep(0.8)\n if not check_status(p) and not p.check_hard_17():\n hit(p, dealer)\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05)\n if p.check_bust():\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n else:\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n if i.hand_value(j) > p.hand_value():\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n elif i.hand_value(j) < p.hand_value():\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n else:\n print(\n f'{i.name} tied with the {p.name}! No change. '\n , end='')\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n sys.stdout.flush()\n time.sleep(0.5)\n elif not p.check_blackjack() and not p.check_broke():\n do_decision(p, dealer)\n\n\ndef stand(player, dealer, hand_index=0):\n print(f'{player.name} stands.\\n')\n return True\n\n\ndef hit(player, dealer, hand_index=0):\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if isinstance(player, people.Dealer):\n while not player.check_hard_17() and not done:\n time.sleep(0.5)\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n else:\n choice = ''\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \").lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n while choice == 'y' and not done:\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \"\n ).lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n if not done:\n print()\n return True\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\ndef double_down(player, dealer, hand_index=0):\n if player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n elif player.did_double_down:\n print('You can double down only once! Choose a different option.')\n return False\n player.bet *= 2\n player.did_double_down = True\n print(f'Bet increased to ${player.bet}!.')\n do_decision(player, dealer, hand_index)\n return True\n\n\ndef check_status(player, hand_index=0):\n done = False\n hand_string = '['\n for card in player.hand[hand_index][:-1]:\n hand_string += card.__str__() + ', '\n print(f'Current Hand: {hand_string}', end='')\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)\n time.sleep(0.5)\n if player.check_blackjack(hand_index):\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n elif player.check_bust(hand_index):\n disp_str_slow(' ==> BUST! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n else:\n print()\n return done\n\n\ndef play_again(players):\n print()\n all_broke = True\n for i in players:\n if not i.check_broke():\n all_broke = False\n if not all_broke:\n choice = input(\n \"Do you all want to play another round? Enter 'y' or 'n': \").lower(\n )\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n print()\n return choice\n else:\n print()\n return 'n'\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\ndef print_players(players):\n for player in players:\n print(player)\n\n\ndef main():\n display_instructions()\n num_players = get_num_players()\n players = create_players(num_players)\n dealer = people.Dealer(Deck(6))\n players.append(dealer)\n replay_choice = 'y'\n while replay_choice == 'y':\n reset(players)\n place_bets(players)\n for i in range(0, 2):\n deal(dealer, players)\n view_hands(players)\n cycle_decisions(players)\n replay_choice = play_again(players)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05)\n sys.stdout.flush()\n time.sleep(0.5)\n display_accounts(players)\n sys.stdout.flush()\n time.sleep(0.2)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n print('Goodbye!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef display_instructions():\n print(\n \"\"\"\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 \"\"\"\n )\n print(\n 'as possible without going over. The numbered cards have the value of their number, face cards have '\n )\n print(\n \"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\\n\"\n )\n print(\n 'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '\n )\n print(\n \"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the \"\n )\n print(\n \"\"\"dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n\"\"\"\n )\n print(\n \" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many \"\n )\n print(\n ' times as wanted, up until the player busts (goes over 21). \\n'\n )\n print(\n \" Stand: To 'stand' means to stay with the current cards. \\n\"\n )\n print(\n \" Split: A player can 'split' only when the first two cards of his or her hand are the \"\n )\n print(\n ' same. When this occurs, the player makes two separate piles, one with each '\n )\n print(\n ' identical card, and places a bet identical to the initial bet for the second '\n )\n print(\n \"\"\" pile. Then, the player can hit or stand with each pile as in a normal round.\n\"\"\"\n )\n print(\n \" Double Down: When a player chooses to 'double down', he or she can increase the current bet \"\n )\n print(\n \"\"\" by 100% in exchange for agreeing to stand after being dealt one more card.\n\"\"\"\n )\n input('Ready to play? Hit any key to continue: ')\n print()\n\n\ndef get_num_players():\n num = input('How many people will be playing (up to 7)? Enter a number: ')\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\n num = input('Please enter a number from 1 to 7: ')\n print(\n \"\"\"\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n\"\"\"\n )\n time.sleep(1)\n return int(num)\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\ndef deal(dealer, players):\n for player in players[:-1]:\n if not player.check_broke():\n dealer.deal_card(player)\n dealer.deal_card(players[-1])\n\n\ndef place_bets(players):\n print('Now, each of you must place your bets.\\n')\n bets = []\n for player in players[:-1]:\n if not player.check_broke():\n bet = input(f'Bet for {player.name}: ')\n while not bet.isdigit() or int(bet) > player.money:\n msg = 'Please enter a whole number: '\n if bet.isdigit():\n msg = (\n \"You don't have enough money! Enter a different value: \"\n )\n bet = input(msg)\n player.bet = int(bet)\n print()\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\ndef cycle_decisions(players):\n dealer = players[-1]\n for p in players:\n if isinstance(p, people.Dealer):\n print(\n f\"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11).\"\n )\n sys.stdout.flush()\n time.sleep(0.8)\n if not check_status(p) and not p.check_hard_17():\n hit(p, dealer)\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05)\n if p.check_bust():\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n else:\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n if i.hand_value(j) > p.hand_value():\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n elif i.hand_value(j) < p.hand_value():\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n else:\n print(\n f'{i.name} tied with the {p.name}! No change. '\n , end='')\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n sys.stdout.flush()\n time.sleep(0.5)\n elif not p.check_blackjack() and not p.check_broke():\n do_decision(p, dealer)\n\n\ndef stand(player, dealer, hand_index=0):\n print(f'{player.name} stands.\\n')\n return True\n\n\ndef hit(player, dealer, hand_index=0):\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if isinstance(player, people.Dealer):\n while not player.check_hard_17() and not done:\n time.sleep(0.5)\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n else:\n choice = ''\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \").lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n while choice == 'y' and not done:\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \"\n ).lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n if not done:\n print()\n return True\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\ndef double_down(player, dealer, hand_index=0):\n if player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n elif player.did_double_down:\n print('You can double down only once! Choose a different option.')\n return False\n player.bet *= 2\n player.did_double_down = True\n print(f'Bet increased to ${player.bet}!.')\n do_decision(player, dealer, hand_index)\n return True\n\n\ndef check_status(player, hand_index=0):\n done = False\n hand_string = '['\n for card in player.hand[hand_index][:-1]:\n hand_string += card.__str__() + ', '\n print(f'Current Hand: {hand_string}', end='')\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)\n time.sleep(0.5)\n if player.check_blackjack(hand_index):\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n elif player.check_bust(hand_index):\n disp_str_slow(' ==> BUST! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n else:\n print()\n return done\n\n\ndef play_again(players):\n print()\n all_broke = True\n for i in players:\n if not i.check_broke():\n all_broke = False\n if not all_broke:\n choice = input(\n \"Do you all want to play another round? Enter 'y' or 'n': \").lower(\n )\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n print()\n return choice\n else:\n print()\n return 'n'\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\ndef print_players(players):\n for player in players:\n print(player)\n\n\ndef main():\n display_instructions()\n num_players = get_num_players()\n players = create_players(num_players)\n dealer = people.Dealer(Deck(6))\n players.append(dealer)\n replay_choice = 'y'\n while replay_choice == 'y':\n reset(players)\n place_bets(players)\n for i in range(0, 2):\n deal(dealer, players)\n view_hands(players)\n cycle_decisions(players)\n replay_choice = play_again(players)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05)\n sys.stdout.flush()\n time.sleep(0.5)\n display_accounts(players)\n sys.stdout.flush()\n time.sleep(0.2)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n print('Goodbye!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from card import Card\nfrom deck import Deck\nimport people\nimport chip\nimport sys\nimport time\n\n\ndef display_instructions():\n print(\n \"\"\"\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 \"\"\"\n )\n print(\n 'as possible without going over. The numbered cards have the value of their number, face cards have '\n )\n print(\n \"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\\n\"\n )\n print(\n 'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '\n )\n print(\n \"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the \"\n )\n print(\n \"\"\"dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n\"\"\"\n )\n print(\n \" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many \"\n )\n print(\n ' times as wanted, up until the player busts (goes over 21). \\n'\n )\n print(\n \" Stand: To 'stand' means to stay with the current cards. \\n\"\n )\n print(\n \" Split: A player can 'split' only when the first two cards of his or her hand are the \"\n )\n print(\n ' same. When this occurs, the player makes two separate piles, one with each '\n )\n print(\n ' identical card, and places a bet identical to the initial bet for the second '\n )\n print(\n \"\"\" pile. Then, the player can hit or stand with each pile as in a normal round.\n\"\"\"\n )\n print(\n \" Double Down: When a player chooses to 'double down', he or she can increase the current bet \"\n )\n print(\n \"\"\" by 100% in exchange for agreeing to stand after being dealt one more card.\n\"\"\"\n )\n input('Ready to play? Hit any key to continue: ')\n print()\n\n\ndef get_num_players():\n num = input('How many people will be playing (up to 7)? Enter a number: ')\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\n num = input('Please enter a number from 1 to 7: ')\n print(\n \"\"\"\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n\"\"\"\n )\n time.sleep(1)\n return int(num)\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\ndef deal(dealer, players):\n for player in players[:-1]:\n if not player.check_broke():\n dealer.deal_card(player)\n dealer.deal_card(players[-1])\n\n\ndef place_bets(players):\n print('Now, each of you must place your bets.\\n')\n bets = []\n for player in players[:-1]:\n if not player.check_broke():\n bet = input(f'Bet for {player.name}: ')\n while not bet.isdigit() or int(bet) > player.money:\n msg = 'Please enter a whole number: '\n if bet.isdigit():\n msg = (\n \"You don't have enough money! Enter a different value: \"\n )\n bet = input(msg)\n player.bet = int(bet)\n print()\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\ndef cycle_decisions(players):\n dealer = players[-1]\n for p in players:\n if isinstance(p, people.Dealer):\n print(\n f\"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11).\"\n )\n sys.stdout.flush()\n time.sleep(0.8)\n if not check_status(p) and not p.check_hard_17():\n hit(p, dealer)\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05)\n if p.check_bust():\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n else:\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n if i.hand_value(j) > p.hand_value():\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n elif i.hand_value(j) < p.hand_value():\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n else:\n print(\n f'{i.name} tied with the {p.name}! No change. '\n , end='')\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n sys.stdout.flush()\n time.sleep(0.5)\n elif not p.check_blackjack() and not p.check_broke():\n do_decision(p, dealer)\n\n\ndef stand(player, dealer, hand_index=0):\n print(f'{player.name} stands.\\n')\n return True\n\n\ndef hit(player, dealer, hand_index=0):\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if isinstance(player, people.Dealer):\n while not player.check_hard_17() and not done:\n time.sleep(0.5)\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n else:\n choice = ''\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \").lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n while choice == 'y' and not done:\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \"\n ).lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n if not done:\n print()\n return True\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\ndef double_down(player, dealer, hand_index=0):\n if player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n elif player.did_double_down:\n print('You can double down only once! Choose a different option.')\n return False\n player.bet *= 2\n player.did_double_down = True\n print(f'Bet increased to ${player.bet}!.')\n do_decision(player, dealer, hand_index)\n return True\n\n\ndef check_status(player, hand_index=0):\n done = False\n hand_string = '['\n for card in player.hand[hand_index][:-1]:\n hand_string += card.__str__() + ', '\n print(f'Current Hand: {hand_string}', end='')\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)\n time.sleep(0.5)\n if player.check_blackjack(hand_index):\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n elif player.check_bust(hand_index):\n disp_str_slow(' ==> BUST! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n else:\n print()\n return done\n\n\ndef play_again(players):\n print()\n all_broke = True\n for i in players:\n if not i.check_broke():\n all_broke = False\n if not all_broke:\n choice = input(\n \"Do you all want to play another round? Enter 'y' or 'n': \").lower(\n )\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n print()\n return choice\n else:\n print()\n return 'n'\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\ndef print_players(players):\n for player in players:\n print(player)\n\n\ndef main():\n display_instructions()\n num_players = get_num_players()\n players = create_players(num_players)\n dealer = people.Dealer(Deck(6))\n players.append(dealer)\n replay_choice = 'y'\n while replay_choice == 'y':\n reset(players)\n place_bets(players)\n for i in range(0, 2):\n deal(dealer, players)\n view_hands(players)\n cycle_decisions(players)\n replay_choice = play_again(players)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05)\n sys.stdout.flush()\n time.sleep(0.5)\n display_accounts(players)\n sys.stdout.flush()\n time.sleep(0.2)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n print('Goodbye!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from card import Card;\r\nfrom deck import Deck;\r\nimport people;\r\nimport chip;\r\nimport sys;\r\nimport time;\r\n\r\ndef display_instructions() :\r\n print('\\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 ');\r\n print('as possible without going over. The numbered cards have the value of their number, face cards have ');\r\n print('a value of 10 each, and the ace can either be counted as 1 or 11 (player\\'s choice)\\n');\r\n print('Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to ');\r\n print('each player (up to 7 players) and to the dealer. The player\\'s cards will be face up while one of the ');\r\n print('dealer\\'s cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \\n');\r\n print(' Hit: when a player \\'hits,\\' he or she is dealt another card. A player can hit as many ');\r\n print(' times as wanted, up until the player busts (goes over 21). \\n');\r\n print(' Stand: To \\'stand\\' means to stay with the current cards. \\n');\r\n print(' Split: A player can \\'split\\' only when the first two cards of his or her hand are the ');\r\n print(' same. When this occurs, the player makes two separate piles, one with each ');\r\n print(' identical card, and places a bet identical to the initial bet for the second ');\r\n print(' pile. Then, the player can hit or stand with each pile as in a normal round.\\n');\r\n print(' Double Down: When a player chooses to \\'double down\\', he or she can increase the current bet ');\r\n print(' by 100% in exchange for agreeing to stand after being dealt one more card.\\n');\r\n input('Ready to play? Hit any key to continue: ');\r\n print();\r\n \r\ndef get_num_players() :\r\n num = input('How many people will be playing (up to 7)? Enter a number: ');\r\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\r\n num = input('Please enter a number from 1 to 7: ');\r\n print('\\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\\n');\r\n time.sleep(1);\r\n return int(num);\r\n \r\ndef create_players(num) :\r\n players_list = [];\r\n for i in range(num) :\r\n name = input(f'Player {i+1}, what is your name? ');\r\n while name == '':\r\n name = input('Please enter your name: ');\r\n players_list.append(people.Player(name, 1000));\r\n print('\\nAll players will begin the game with the same amount of $1,000 dollars.\\n');\r\n return players_list;\r\n \r\ndef deal(dealer, players) :\r\n for player in players[:-1] : \r\n if not player.check_broke() : dealer.deal_card(player);\r\n dealer.deal_card(players[-1]); # dealer deals card to dealer, too\r\n \r\ndef place_bets(players) :\r\n print('Now, each of you must place your bets.\\n');\r\n bets = [];\r\n for player in players[:-1] : # doesn't reach dealer\r\n if not player.check_broke() :\r\n bet = input(f'Bet for {player.name}: ');\r\n while not bet.isdigit() or int(bet) > player.money :\r\n msg = 'Please enter a whole number: ';\r\n if bet.isdigit() :\r\n msg = 'You don\\'t have enough money! Enter a different value: ';\r\n bet = input(msg);\r\n player.bet = int(bet);\r\n print(); \r\n \r\ndef view_hands(players) :\r\n print('Here are the hands for each player: \\n');\r\n for p in players :\r\n if isinstance(p, people.Dealer) :\r\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='');\r\n print();\r\n else :\r\n if not p.check_broke() :\r\n print(f'{p.name}: {p.hand}', end='');\r\n if p.check_blackjack() :\r\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!');\r\n else : print();\r\n print();\r\n \r\ndef do_decision(player, dealer, hand_index=0) :\r\n choices_dict = {'s':stand, 'h':hit, 'p':split, 'd':double_down};\r\n valid_choice = False;\r\n while not valid_choice :\r\n choice = input(f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): ');\r\n while choice.lower() not in choices_dict.keys() :\r\n choice = input('Please enter either \\'s\\', \\'h\\', \\'p\\', or \\'d\\', corresponding to your choice: ');\r\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index);\r\n \r\ndef cycle_decisions(players) :\r\n dealer = players[-1];\r\n for p in players :\r\n if isinstance(p, people.Dealer) :\r\n print(f'{p.name} will hit until reaching a hand of at least \\'hard\\' 17 (without an ace counting for 11).');\r\n sys.stdout.flush();\r\n time.sleep(0.8);\r\n if not check_status(p) and not p.check_hard_17() : hit(p, dealer);\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05);\r\n if p.check_bust() :\r\n for i in players[:-1] :\r\n if not i.check_broke() :\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n print(' ', end='');\r\n for j in range(0,len(i.hand)) : # this is to loop through each hand for a player (player would have multiple hands after splitting)\r\n if not i.check_bust(j) :\r\n print(f'{i.name} wins ${i.bet}! ', end='');\r\n i.money += i.bet;\r\n else :\r\n print(f'{i.name} loses ${i.bet}! ', end='');\r\n i.money -= i.bet;\r\n i.chips = chip.convert_to_chips(i.money);\r\n if i.check_broke() :\r\n print(f'Sorry {i.name}, but you\\'re out of money and can no longer play in this game');\r\n else :\r\n print(f'Current Balance: ${i.money} (Chips: {i.chips})');\r\n else :\r\n for i in players[:-1] :\r\n if not i.check_broke() :\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n print(' ', end='');\r\n for j in range(0,len(i.hand)) :\r\n if not i.check_bust(j) :\r\n if i.hand_value(j) > p.hand_value() :\r\n print(f'{i.name} wins ${i.bet}! ', end='');\r\n i.money += i.bet;\r\n elif i.hand_value(j) < p.hand_value() :\r\n print(f'{i.name} loses ${i.bet}! ', end='');\r\n i.money -= i.bet;\r\n else :\r\n print(f'{i.name} tied with the {p.name}! No change. ', end='');\r\n else :\r\n print(f'{i.name} loses ${i.bet}! ', end='');\r\n i.money -= i.bet;\r\n i.chips = chip.convert_to_chips(i.money);\r\n if i.check_broke() :\r\n print(f'Sorry {i.name}, but you\\'re out of money and can no longer play in this game');\r\n else :\r\n print(f'Current Balance: ${i.money} (Chips: {i.chips})');\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n else :\r\n if not p.check_blackjack() and not p.check_broke() :\r\n do_decision(p, dealer);\r\n \r\ndef stand(player, dealer, hand_index=0) :\r\n print(f'{player.name} stands.\\n');\r\n return True;\r\n \r\ndef hit(player, dealer, hand_index=0) :\r\n dealer.deal_card(player, hand_index);\r\n done = check_status(player, hand_index);\r\n if isinstance(player, people.Dealer) :\r\n while not player.check_hard_17() and not done:\r\n time.sleep(0.5);\r\n dealer.deal_card(player, hand_index);\r\n done = check_status(player, hand_index);\r\n else :\r\n \r\n choice = '';\r\n if not done :\r\n choice = input('Do you want to hit again (\\'y\\' or \\'n\\')? ').lower();\r\n while choice != 'y' and choice != 'n' :\r\n choice = input('Enter either \\'y\\' or \\'n\\': ');\r\n while choice == 'y' and not done:\r\n dealer.deal_card(player, hand_index);\r\n done = check_status(player, hand_index);\r\n if not done :\r\n choice = input('Do you want to hit again (\\'y\\' or \\'n\\')? ').lower();\r\n while choice != 'y' and choice != 'n' :\r\n choice = input('Enter either \\'y\\' or \\'n\\': ');\r\n if not done : print();\r\n return True;\r\n \r\ndef split(player, dealer, hand_index=0) :\r\n if player.hand[hand_index][0] != player.hand[hand_index][1] :\r\n print('You can\\'t split on that hand! You need two identical cards to split. Choose again.');\r\n return False;\r\n elif player.bet*2 > player.money :\r\n print(f'You don\\'t have enough money to split with your current bet (${player.bet} * 2 = ${player.bet*2})! Choose again.');\r\n return False;\r\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]];\r\n player.hand = hands;\r\n print('Now you will play each hand separately: \\n');\r\n for i in range(0,2) :\r\n print(f'For Hand #{i+1}: ');\r\n do_decision(player, dealer, i); \r\n return True;\r\n \r\n \r\ndef double_down(player, dealer, hand_index=0) :\r\n if player.bet*2 > player.money :\r\n print(f'You don\\'t have enough money to do that (${player.bet} * 2 = ${player.bet*2})! Choose again.');\r\n return False;\r\n elif player.did_double_down :\r\n print('You can double down only once! Choose a different option.');\r\n return False;\r\n player.bet *= 2;\r\n player.did_double_down = True;\r\n print(f'Bet increased to ${player.bet}!.');\r\n do_decision(player, dealer, hand_index);\r\n return True;\r\n \r\ndef check_status(player, hand_index=0) :\r\n done = False;\r\n hand_string = '[';\r\n for card in player.hand[hand_index][:-1] :\r\n hand_string += card.__str__() + ', ';\r\n print(f'Current Hand: {hand_string}', end='');\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05);\r\n time.sleep(0.5);\r\n if player.check_blackjack(hand_index) :\r\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05);\r\n if not isinstance(player, people.Dealer) : \r\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05);\r\n print('\\n\\n', end='');\r\n done = True;\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n elif player.check_bust(hand_index) :\r\n disp_str_slow(' ==> BUST! ', 0.05);\r\n if not isinstance(player, people.Dealer) : \r\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05);\r\n print('\\n\\n', end='');\r\n done = True;\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n else :\r\n print();\r\n return done;\r\n \r\ndef play_again(players) :\r\n print();\r\n all_broke = True;\r\n for i in players :\r\n if not i.check_broke() : all_broke = False;\r\n if not all_broke :\r\n choice = input('Do you all want to play another round? Enter \\'y\\' or \\'n\\': ').lower();\r\n while choice != 'y' and choice != 'n' :\r\n choice = input('Enter either \\'y\\' or \\'n\\': ');\r\n print();\r\n return choice;\r\n else :\r\n print();\r\n return 'n';\r\n \r\ndef reset(players) :\r\n dealer = players[-1];\r\n for player in players : \r\n dealer.retrieve_cards(player);\r\n player.bet = 0;\r\n \r\ndef display_accounts(players) :\r\n for player in players[:-1] :\r\n change = player.money - player.initial_money;\r\n word = 'gain';\r\n if change < 0 : \r\n word = 'loss';\r\n print(f' {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\\n');\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n \r\ndef disp_str_slow(phrase, t) :\r\n for i in phrase :\r\n print(i, end='');\r\n sys.stdout.flush();\r\n time.sleep(t);\r\n\r\ndef print_players(players) :\r\n for player in players :\r\n print(player);\r\n\r\ndef main() :\r\n display_instructions();\r\n num_players = get_num_players();\r\n players = create_players(num_players);\r\n dealer = people.Dealer(Deck(6));\r\n players.append(dealer);\r\n \r\n replay_choice = 'y';\r\n while replay_choice == 'y' :\r\n reset(players);\r\n place_bets(players);\r\n for i in range(0,2) :\r\n deal(dealer, players);\r\n view_hands(players); \r\n cycle_decisions(players);\r\n replay_choice = play_again(players); \r\n \r\n print('------------------------------------------------------------------------------------------------\\n');\r\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05);\r\n sys.stdout.flush();\r\n time.sleep(0.5)\r\n display_accounts(players);\r\n sys.stdout.flush(); \r\n time.sleep(0.2)\r\n print('------------------------------------------------------------------------------------------------\\n');\r\n print('Goodbye!');\r\n \r\nif __name__ == '__main__' :\r\n main();",
"step-ids": [
7,
19,
20,
21,
22
]
}
|
[
7,
19,
20,
21,
22
] |
<|reserved_special_token_0|>
def callback():
print('callback invoked')
def stopper(loop):
print('stopper invoked')
loop.stop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def callback():
print('callback invoked')
def stopper(loop):
print('stopper invoked')
loop.stop()
<|reserved_special_token_0|>
try:
print('registering callbacks')
event_loop.call_soon(callback)
event_loop.call_soon(stopper, event_loop)
print('entering event loop')
event_loop.run_forever()
finally:
print('closing event loop')
event_loop.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def callback():
print('callback invoked')
def stopper(loop):
print('stopper invoked')
loop.stop()
event_loop = asyncio.get_event_loop()
try:
print('registering callbacks')
event_loop.call_soon(callback)
event_loop.call_soon(stopper, event_loop)
print('entering event loop')
event_loop.run_forever()
finally:
print('closing event loop')
event_loop.close()
<|reserved_special_token_1|>
import asyncio
def callback():
print('callback invoked')
def stopper(loop):
print('stopper invoked')
loop.stop()
event_loop = asyncio.get_event_loop()
try:
print('registering callbacks')
event_loop.call_soon(callback)
event_loop.call_soon(stopper, event_loop)
print('entering event loop')
event_loop.run_forever()
finally:
print('closing event loop')
event_loop.close()
<|reserved_special_token_1|>
import asyncio
def callback():
print('callback invoked')
def stopper(loop):
print('stopper invoked')
loop.stop()
event_loop = asyncio.get_event_loop()
try:
print('registering callbacks')
# the callbacks are invoked in the order they are scheduled
event_loop.call_soon(callback)
event_loop.call_soon(stopper, event_loop)
print('entering event loop')
event_loop.run_forever()
finally:
print('closing event loop')
event_loop.close()
|
flexible
|
{
"blob_id": "3b96cc4ef538a06251958495e36fe5dbdf80c13d",
"index": 4952,
"step-1": "<mask token>\n\n\ndef callback():\n print('callback invoked')\n\n\ndef stopper(loop):\n print('stopper invoked')\n loop.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef callback():\n print('callback invoked')\n\n\ndef stopper(loop):\n print('stopper invoked')\n loop.stop()\n\n\n<mask token>\ntry:\n print('registering callbacks')\n event_loop.call_soon(callback)\n event_loop.call_soon(stopper, event_loop)\n print('entering event loop')\n event_loop.run_forever()\nfinally:\n print('closing event loop')\n event_loop.close()\n",
"step-3": "<mask token>\n\n\ndef callback():\n print('callback invoked')\n\n\ndef stopper(loop):\n print('stopper invoked')\n loop.stop()\n\n\nevent_loop = asyncio.get_event_loop()\ntry:\n print('registering callbacks')\n event_loop.call_soon(callback)\n event_loop.call_soon(stopper, event_loop)\n print('entering event loop')\n event_loop.run_forever()\nfinally:\n print('closing event loop')\n event_loop.close()\n",
"step-4": "import asyncio\n\n\ndef callback():\n print('callback invoked')\n\n\ndef stopper(loop):\n print('stopper invoked')\n loop.stop()\n\n\nevent_loop = asyncio.get_event_loop()\ntry:\n print('registering callbacks')\n event_loop.call_soon(callback)\n event_loop.call_soon(stopper, event_loop)\n print('entering event loop')\n event_loop.run_forever()\nfinally:\n print('closing event loop')\n event_loop.close()\n",
"step-5": "import asyncio\n\ndef callback():\n print('callback invoked')\n\ndef stopper(loop):\n print('stopper invoked')\n loop.stop()\n\nevent_loop = asyncio.get_event_loop()\ntry:\n print('registering callbacks')\n # the callbacks are invoked in the order they are scheduled\n event_loop.call_soon(callback)\n event_loop.call_soon(stopper, event_loop)\n print('entering event loop')\n event_loop.run_forever()\nfinally:\n print('closing event loop')\n event_loop.close()\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# #!/usr/bin/python
# last edit abigailc@Actaeon on jan 27 2017
#pulling the taxonomy functions out of makespeciestree because I need to make them faster...
#insects is running for literally >20 hours.
names_file = "/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/names.dmp"
nodes_file = "/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/nodes.dmp"
######### PERSONAL_SETTINGS #########
ssh_inst = "ssh -l abigailc -i ~/.ssh/id_rsa eofe4.mit.edu"
clus_head = "abigailc@eofe4.mit.edu:/home/abigailc/"
Path_Blast = "/Users/abigailc/blast/"
import os
import re
import time
import sys
#from oxy_mods.Classes_DTL_Detector import Fasta
#BASIC OPERATIONS
def Str_To_Taxid(string, names_file):
#init done
#turns a string to its taxon id NCBI
#this is easier than expected. just open names.dmp and find the first hit. format:
found = False
#print("strtotaxid")
#print(string+" str to taxid")
string = string.replace("_", " ")
#print(string)
with open (names_file) as names:
for line in names:
if "\t"+string+"\t" in line:
#print("got:"+line)
taxid_int = re.sub ("(\d*)(\t\|\t)("+string+")(\t)(.*)", "\\1", line)
found = True
break
if found is False:
print("Error finding string: "+string+" in file: "+names_file)
taxid_int = "NA"
return taxid_int
def Taxid_To_Children(taxid, nodes_file):
#goes one level deeper. finds all taxids that list the given taxid as "parent", returns as a list
childlist = []
child_rank_list = []
with open (nodes_file) as nodes:
for line in nodes:
if "\t"+taxid+"\t" in line:
#print("gotcha")
#print(line)
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
baby_taxid_rank = re.sub("(\d*)(\t\|\t)("+taxid+")(\t\|\t)([a-z]*)(.*)", "\\1~\\5", line)
if "\t" in baby_taxid_rank:
#this happens if the re.sub does not occur - eg if \ttaxid\t occured somewhere in the line other than where it should've.
pass
else:
baby_taxid, baby_rank = baby_taxid_rank.split("~")
#add to list of bbys
baby_taxid = baby_taxid.strip()
baby_rank = baby_rank.strip()
childlist.append(baby_taxid)
child_rank_list.append((baby_taxid, baby_rank))
return child_rank_list
def Get_Taxid_Rank(taxid, nodes_file):
taxid = taxid.strip()
ranklist = []
len_tax = len(taxid)
len_tax_t = len_tax+1
#given taxid = 100, len_tax = 3, len_tax_t = 5
with open (nodes_file) as nodes:
for line in nodes:
#print(line[:len_tax_t])
#print(taxid+"\t")
if line[:len_tax_t] == taxid+"\t":
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
apparent_rank = re.sub("("+taxid+")(\t\|\t)(\d*)(\t\|\t)([a-z]*)(.*)", "\\5", line)
apparent_rank = apparent_rank.strip()
if "\t" in apparent_rank:
pass
else:
return apparent_rank
return "NA"
#returns the rank (eg, "order" of a taxid")
def One_Rank_Lower(rank):
print("looking one level lower than"+rank)
if rank == "species":
print("is species!")
return "NA"
ordered_str = "superkingdom kingdom phylum class order family genus species"
ordered_list = ordered_str.split()
if rank in ordered_list:
pass
elif rank == "NA":
return "NA"
else:
print(rank+" is weird")
return "NA"
current = ordered_list.index(rank)
lowindex = current + 1
one_lower = ordered_list[lowindex]
return one_lower
#given phylum, returns class. given class, return order. etc.
# rank = "class"
# string = "cyanobacteria"
# taxid = "12345"
def Return_Parent(taxid, nodes_file):
#eg for a given rank taxid, find it's up-one-level (not rank) taxid, and return it.
len_tax = len(taxid.strip())
len_tax_t = len_tax+1
#given taxid = 100, len_tax = 3, len_tax_t = 5
#print("searching for one level above taxid:"+str(taxid))
#print("tiud: "+taxid)
with open (nodes_file) as nodes:
for line in nodes:
#print(taxid.strip()+"\t")
#print(line[:len_tax_t])
if line[:len_tax_t] == taxid.strip()+"\t":
# print("got: "+line)
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
parent_taxid = re.sub("("+taxid.strip()+")(\t\|\t)(\d*)(\t\|\t)([a-z]*)(.*)", "\\3", line)
#print(parent_taxid)
if "\t" in parent_taxid:
pass
else:
return parent_taxid
print("error finding parent taxa")
return("NA")
#COMPLEX OPERATIONS
def Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file, names_file, acc_list):
children = []
list_ch_remove = []
child_list_a = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_atup = Taxid_To_Children(taxid, nodes_file)
#this is a list of children TAXIDS ONLY
#print("initial pass")
#print(child_list_atup)
#print(child_list_a)
done = False
saved_top_level = []
#we're going to do one at a time, so save all, and load them one-by-one.
for itema in child_list_atup:
saved_top_level.append(itema)
maxi = len(saved_top_level)
# print("maxi: "+str(maxi))
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
i = 1
#also lets implement a saved second level... for further spe.
while done is False:
for item in child_list_atup:
if item[1] == "species":
#add the taxid to the list of species_level_children
children.append(item[0])
sis_spec_name = Taxid_To_Name(item[0], names_file)
if sis_spec_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name, acc_list)
if in_blast is True:
return sis_spec_name
list_ch_remove.append(item)
#remove taxids that were saved at the species level
#print(list_ch_remove)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
#if all tips have terminated at the species level: you are done.
if child_list_a == []:
if i == maxi:
#print("found none")
return "NA"
done = True
else:
i += 1
#print(i)
list_ch_remove = []
atup = saved_top_level[0]
#print(atup)
saved_top_level.remove(atup)
child_list_atup = [atup]
#print(child_list_atup)
for item in child_list_atup:
child_list_a.append(item[0])
continue
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
#print("New parent list:")
#print(child_list_atup)
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
#print(child_list_a)
#children is a list of all species-level TAXIDS that belong to the given group.
return "NA"
#WHY ARE THERE TWO OF THESE???????
def Ret_A_Valid_Species_Below(taxid, nodes_file, names_file, acc_list):
masterlist = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
complete = False
masterlist.append([(taxid, "starter")])
while complete is False:
#print(masterlist)
if masterlist == []:
return("NA")
#now lookat is the last member of the last list in masterlist.
now_list = masterlist[-1]
if now_list == []:
while [] in masterlist:
masterlist.remove([])
if masterlist == []:
return("NA")
now_list = masterlist[-1]
#lookat first member of that list.
now_tup = now_list[0]
now_taxid, now_rank = now_tup[0], now_tup[1]
#see if its a species
if now_rank == "species":
now_list.remove(now_tup)
now_name = Taxid_To_Name(now_taxid, names_file)
if now_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(now_name,acc_list)
if in_blast is True:
#now_name is a species_name
return now_name
#check if now_tup is valid. if so, return.
else:
now_list.remove(now_tup)
#generate a new list - of the descendents of this one.
newlist = Taxid_To_Children(now_taxid, nodes_file)
#print(newlist)
if newlist == "NA":
pass
else:
#add it to masterlist.
masterlist.append(newlist)
return("Uh, what?")
def Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):
children = []
list_ch_remove = []
child_list_a = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_atup = Taxid_To_Children(taxid, nodes_file)
#this is a list of children TAXIDS ONLY
for item in child_list_atup:
child_list_a.append(item[0])
#print("initial pass")
#print(child_list_atup)
#print(child_list_a)
done = False
while done is False:
for item in child_list_atup:
if item[1] == "species":
#add the taxid to the list of species_level_children
children.append(item[0])
list_ch_remove.append(item)
#remove taxids that were saved at the species level
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
#if all tips have terminated at the species level: you are done.
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
#for remaining non-species level taxids in lista:
# -get their children (listb)
# -add their children to a persistant list(listc)
# -then set lista(the list to check and remove species-level-entries) to be == listc.
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
#print("New parent list:")
#print(child_list_atup)
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
#print(child_list_a)
#children is a list of all species-level TAXIDS that belong to the given group.
return children
def Ret_All_Groups_One_Rank_Below(taxid, nodes_file):
taxid = taxid.strip()
print("looking for taxid:"+str(taxid))
rank = Get_Taxid_Rank(taxid, nodes_file)
print(rank)
#raise SystemExit
target_rank = One_Rank_Lower(rank)
if target_rank == "NA":
return("NA")
removal_ranks = "superkingdom kingdom phylum class order family genus species"
garbage, remove_string = removal_ranks.split(target_rank)
remove_rank_list = remove_string.split()
children = []
list_ch_remove = []
#print(remove_rank_list)
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_a = Taxid_To_Children(taxid, nodes_file)
done = False
while done is False:
for item in child_list_a:
if item[1] == target_rank:
#add the taxid to the list of species_level_children
children.append(item[0])
list_ch_remove.append(item)
if item[1] in remove_rank_list:
list_ch_remove.append(item)
#remove taxids that were saved at the species level
for rem in list_ch_remove:
child_list_a.remove(rem)
#if all tips have terminated at the target species level: you are done.
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
#for remaining non-species level taxids in lista:
# -get their children (listb)
# -add their children to a persistant list(listc)
# -then set lista(the list to check and remove species-level-entries) to be == listc.
for parent in child_list_a:
child_list_b = Taxid_To_Children(parent[0], nodes_file)
if child_list_b == []:
pass
else:
for bitem in child_list_b:
child_list_c.append(bitem)
child_list_a = child_list_c
#print(child_list_a)
#children is a list of all ONE-RANK-BELOW level TAXIDS that belong to the given group.
return children
#runs until all children are found of one rank below. eg (CLASS -> [order1, order 2, order3, order 4)
#for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.
def Ret_Sister_Same_Rank(string, nodes_file, names_file):
#from str rank - get current taxid, go up one level, then get all descendents in a list, remove the current taxid, and return the resulting sister list
print(string)
interest_taxid = Str_To_Taxid(string, names_file)
print(interest_taxid)
up_taxid = Return_Parent(interest_taxid, nodes_file)
up_taxid = up_taxid.strip()
interest_taxid = interest_taxid.strip()
sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)
sister_and_self = []
for tup in sis_self_tuples:
sister_and_self.append(tup[0])
#sis_and_self is a list of TAXIDS ONLY
print(sister_and_self)
print(interest_taxid)
sister_and_self.remove(interest_taxid)
sisterlist = sister_and_self
print(sisterlist)
return sisterlist
#sisterlist will be a list of taxids for the sister clades to the current thing. by level, not by rank.
#todo = implement something to redo if sisterlist is empty.
def Taxid_To_Name(taxid, names_file):
#this needs to be the backwards version of Str to Taxid.
found = False
taxid = taxid.strip()
len_tax = len(taxid)
len_tax_t = len_tax+1
with open (names_file) as names:
for line in names:
if line[:len_tax_t] == taxid+"\t":
# print("got here")
name_wanted = re.sub ("(\d*)(\t\|\t)([^\t]*)(\t\|\t)(.*)(\t\|\t)(scientific name)(.*)", "\\3", line)
if "\t" in name_wanted:
pass
else:
found = True
break
if found is False:
print("Error finding name for: "+taxid+" in file: "+names_file)
name_wanted = "NA"
if found is True:
#print(name_wanted)
name_wanted = name_wanted.strip()
return name_wanted
def Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):
print("one og sequence choser initiating")
if "_" in string:
string = string.replace("_", " ")
sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)
print("Sisterlist")
print(sislist)
if sislist == []:
go = True
else:
go = False
my_taxid = Str_To_Taxid(string, names_file)
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
#spec_sis_list = Ret_All_Species_Below(item, nodes_file)
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
if test == "NA":
pass
else:
print(test)
return test
#if test == "None":
# return "None"
#if nothing in the first level sister list is a valid hit, keep moving up the tree until you get one.
while test == "NA":
sislist = []
go = True
if my_taxid == 1:
break
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
if test != "NA":
pass
else:
return test
return test
#print (spec_sis_list)
#for sis_spec_taxid in spec_sis_list:
# sis_spec_name = Taxid_To_Name(sis_spec_taxid, names_file)
# in_blast = Check_Spec_Name_Blast_File(sis_spec_name, blast_file)
# if in_blast is True:
# print("Outgroup sequence chosen:"+sis_spec_name)
# return sis_spec_name
#double break so we only keep ONE sequence.
#go all the way down the first one until you get a species-level entry.
#check if the species-level entry is found in your .blast file (if that is where we are implementing this??? )
#if not, continue... check each species-level thing you find.
#this would then need to be included in make_species_trees... and only called if the request is sent directly from Parser_blah_master.
def Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):
print("Checking for reps... target rank is: "+rank)
list_of_correct_rank = []
found = []
removal_ranks = "superkingdom kingdom phylum class order family genus species"
remove_string, garbage = removal_ranks.split(rank)
remove_rank_list = remove_string.split()
for species in species_list:
nid = Str_To_Taxid(species, names_file)
#go up the ladder
go = True
while go is True:
#get parent taxid
rp = Return_Parent(nid, nodes_file)
#if its 1, we're done.
if rp == "NA":
list_of_correct_rank.append(rp)
go = False
if rp.strip() == 1:
rp = "NA"
list_of_correct_rank.append(rp)
go = False
#get rank for that new taxid
par_rank = Get_Taxid_Rank(rp, nodes_file)
#if it's what we want it to be, add to list.
if par_rank == rank:
rp = rp.strip()
list_of_correct_rank.append(rp)
go = False
#if its a step too high, terminate - we went too far somehow
elif par_rank in remove_rank_list:
rp = "NA"
list_of_correct_rank.append(rp)
go = False
#else, go up another level and test that one!
else:
nid = rp
print(tid_list)
print(list_of_correct_rank)
for item in tid_list:
if item in list_of_correct_rank:
a = tid_list.index(item)
found.append(tid_list[a])
return found
#@blast_file should actually be a list of raw_blast_FASTA objects
def Choose_Loss_Candidates(string, species_list, names_file, acc_list, nodes_file):
print("loss search initiating")
if "_" in string:
print(string)
string = string.replace("_", " ")
print(string)
taxid = Str_To_Taxid(string, names_file)
#for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.
sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)
if sub_taxids == "NA":
print("Error getting loss candidates for string:"+string)
return([])
subgroup_names = []
for item in sub_taxids:
subgroup_names.append(Taxid_To_Name(item, names_file))
b = Get_Taxid_Rank(taxid, nodes_file)
a = One_Rank_Lower(b)
found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)
print("Representatives already exist for:")
found_names = []
for foundtid in found:
foundtid = foundtid.strip()
index1 = sub_taxids.index(foundtid)
found_names.append(subgroup_names.pop(index1))
del sub_taxids[index1]
print(found_names)
print("Looking for one representative from each of the following:")
print(subgroup_names)
loss_list = []
ite = 0
# #first check if it is in the output loss list.
# for item in sub_taxids:
# with open(saved_loss_candidates) as saved:
# for line in saved:
# if item in line:
# #newthing will be a species name.
# newthing = re.sub("("item")(\t)(.*)", "\\3", line))
# loss_list.append(newthing)
# found2.append(item)
# break
#remove those found from file from the search list.
# for item in found2:
# sub_taxids.pop(item)
for item in sub_taxids:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
#print(test)
print(subgroup_names[ite]+" : "+test)
ite+=1
loss_list.append(test)
continue
print("Loss candidates will be added:")
na = 0
for item in loss_list:
if item == "NA":
na +=1
while "NA" in loss_list: loss_list.remove("NA")
print(loss_list)
print("there were "+str(na)+" "+a+"s that no suitable loss candidate was found for.")
return loss_list
#either one per next-level-down
#or one per next-rank-down
def Check_Spec_Name_Acceptable_List(ssp_name, acc_list):
if ssp_name in acc_list:
return True
else:
result = next((True for item in acc_list if ssp_name in item), False)
if result is True:
print("Err in match spec name - gen list: "+ ssp_name +" "+ item)
return result
def Check_Spec_Name_Blast_File(ssp_name, blast_fasta_list):
lf = (len(blast_fasta_list))
half = lf/2
yes = 0
att = 0
#print("Checking :"+ssp_name)
ssp_name = ssp_name.replace(" ", "_")
ssp_name = ssp_name.strip()
for current_blast in blast_fasta_list:
att += 1
if att > 6:
if yes < att/3:
return False
if ssp_name in current_blast.species_names:
yes += 1
continue
else:
#print(ssp_name)
#print(current_blast.species_names[0])
for spec in current_blast.species_names:
if ssp_name in spec:
yes +=1
break
continue
#print(yes)
#print(half)
if yes > half:
#print("validated: "+ssp_name)
return True
else:
return False
def gen_acceptable_species_list(list_raw_gene_fastas, acc_name):
#this is printing an empty file. why?
names_list_acc = []
numbers_list_acc = []
for raw in list_raw_gene_fastas:
#do they have species lists?
raw.gen_species_lists()
raw_sl = raw.species_names
print(raw_sl[0])
for rawsp in raw_sl:
if rawsp in names_list_acc:
ind = names_list_acc.index(rawsp)
numbers_list_acc[ind] = numbers_list_acc[ind]+1
else:
names_list_acc.append(rawsp)
numbers_list_acc.append(1)
#the numbers list can specify a cut off that is necesary for the thing being acceptable
#for now let's be consistant and use 1/2 of lsit of raw fastas?
cutoff_num = (len(list_raw_gene_fastas)/2)
print(cutoff_num)
#this will be 15 currently. might be .5 sometimes.
list_of_rem = []
index = 0
for n in numbers_list_acc:
if n > cutoff_num:
#means that we dont care if its a decimal or not. 1 will pass .5
pass
else:
list_of_rem.append(names_list_acc[index])
#add the index to be removed to a list. index into names and numbers should be identicle
index +=1
print(len(list_of_rem))
list_of_rem.sort(reverse=True)
for remove_me in list_of_rem:
#uhhhhh i think we need to sort the numbers so removal of the largest number happens first so as to not fuck up list order.
#sorting now. should be good.
names_list_acc.remove(remove_me)
a = write_acc_list(names_list_acc, acc_name)
return a
def write_acc_list(acc_list, acc_name):
with open(acc_name, "w") as acc_list_file:
for item in acc_list:
acc_list_file.write(item+"\n")
return acc_name
def write_spc_list(spc_list, spcname):
with open(spcname, "w") as spc_list_file:
for item in spc_list:
#stripiing strain data from this version of the species_list such that it will
if "_" in item:
dash_sep = item.split("_")
item = dash_sep[0]+"_"+dash_sep[1]
spc_list_file.write(item+"\n")
return spcname
#parser stuff
def Run_OG_LOSS_ON_CLUSTER(script_name,all_files, all_result_files):
#here acc list is the name of the acc_list_current_file
#auto gen an sbatch script
os.system(ssh_inst+" \'mkdir Taxonomy\'")
sb_script = script_name
#scp it over
print(all_files)
for item in all_files:
os.system("scp "+item+" "+clus_head+"Taxonomy")
#run it
#edit the script on the cluster to deal with my mistakes
os.system(ssh_inst+" 'cd ~/Taxonomy; sbatch "+sb_script+"'")
#scp it back and verify
direct = os.getcwd()
exists = False
#now it should exist locally
movehome = []
finished = "start"
#bring home the d
for i in all_result_files:
movehome.append(i)
while finished is not True:
for filename in movehome:
os.system("scp "+clus_head+"Taxonomy/"+filename+" "+direct)
for item in all_result_files:
#see if it got moved home.
exists = os.path.isfile(item)
if exists is True:
if item in movehome:
movehome.remove(item)
finished = "yes"
else:
finished = False
print("Tax not done yet. could not locate : "+item+"checking again in 5 minutes")
break
if finished == "yes":
print("Should be done!")
finished = True
else:
#wait ten minutes and then try again.
time.sleep(600)
finished = "yes"
#TEMPORARILY REMOVED result file deletion from the cluster to make testing progress faster.
#for item in all_result_files:
# os.system(ssh_inst+" 'cd ~/Taxonomy; rm "+item+"'")
#for item in all_files:
# os.system(ssh_inst+" 'cd ~/Taxonomy; rm "+item+"'")
print("Taxonomy parsing complete")
#remove the script and the og loss file from cluster
def Get_OG_LOSS_DATA(list_of_clades, projectname):
#the acceptable list should be a list of taxa that are present in at least 50% (?) of the blast hit files for the genes given.
#get all gene-query-files to look at
list_catfiles = []
list_of_lists_of_raw_blast_files = []
for item in list_of_clades:
catfile = item.cat_file
list_of_raw_blast_files = item.blast_raw
if catfile in list_catfiles:
pass
else:
list_catfiles.append(catfile)
list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)
cat_acc_dict = {}
#for each, create an acceptable list output name
for i in range(len(list_catfiles)):
item = list_catfiles[i]
list_raws = list_of_lists_of_raw_blast_files[i]
gsflist = item.split(".")
gsf_a = gsflist[0]
gsf_b = gsf_a.split("/")[-1]
acc_file = gsf_b+"_Acc_List.txt"
#print("Looking for loss-candidates and a rooting sequence to add....")
acc_exists = os.path.isfile(acc_file)
if acc_exists is True:
pass
#if not already done, actually make the output acceptable list.
else:
print("....initializing all_acceptables from gene_seq_query file: "+gsf_b+". this should only happen once...")
#generate it
#should be passing in A LIST OF ALL THE BLAST_FILES ASSOCIATED WITH THE GENE. eg the things in Raw_Blasts that were consulted.
#are these stored in each subtree? should pass a list of fasta objects.
#ist_raw_objects = []
#rint(list_raws)
#or raw in list_raws:
# print(raw.name)
acc_file = gen_acceptable_species_list(list_raws, acc_file)
#this is returning "NONE" which is super not okay.
cat_acc_dict[item] = acc_file
list_of_species_files = Gen_Species_File(list_of_clades, projectname)
#check if we already ran the taxonomy and have data downloaded. (this is mostly for while fixing errors; i keep getting stuck at this point & ity is a waste of time to re-run the taxonomy parser.
list_to_tax_clades = []
for item in list_of_clades:
exists_result = os.path.isfile(item.result)
if exists_result is False:
list_to_tax_clades.append(item)
#sets species_file and result to each subtree.
corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades, cat_acc_dict, projectname)
#makes the correlation file.
#for each clade, generate a species_list, result name, acc_file_name, string_name and print them all to a corr.file
n = len(list_to_tax_clades)
#gen the script
script_name = projectname+"_OGLScript.sh"
scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)
all_files = []
for item in cat_acc_dict.values():
all_files.append(item)
for item in list_of_species_files:
all_files.append(item)
all_files.append(scriptfile)
all_files.append(corr_file_name)
if len(results_list) is 0:
pass
else:
Run_OG_LOSS_ON_CLUSTER(scriptfile,all_files, results_list)
#run the script
#add loss_species, root_species to each subtree as a value and also add them to the species_list going forward.
for item in list_of_clades:
results_file = item.result
loss_species = []
print(item.string_name)
#open the file and get loss and species results.
with open(results_file) as res:
# print("opened")
a=0
for line in res:
#get loss results
if a == 0:
loss_species = line.strip()
loss_species = loss_species.split("~")
print("loss candidates")
if "" in loss_species:
loss_species.remove ("")
if "\n" in loss_species:
loss_species.remove("\n")
item.loss_species_list = loss_species
print(loss_species)
#get root results
if a == 1:
root_species = line.strip()
item.root_species = root_species
print("root: "+root_species)
#get how long it took
if a == 2:
print("time:")
print(line)
a += 1
#if no loss, do nothing
item.species_list_plus_og_loss = []
for thing in item.species_list_original:
item.species_list_plus_og_loss.append(thing)
if loss_species == []:
pass
#else, add them to the species list, and also track them(?)
else:
for ls in loss_species:
item.species_list_plus_og_loss.append(ls)
if root_species == "":
pass
else:
item.species_list_plus_og_loss.append(root_species)
return results_list
# os.system("rm "+results_file)
#done
def Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):
corr_file_name = "Corr_"+projectname+".txt"
results_list = []
with open(corr_file_name, "w") as corr:
for n in range(len(list_of_clades)):
corr.write(str(n+1)+" "+list_of_clades[n].species_file+" "+list_of_clades[n].string_name+" "+cat_acc_dict[list_of_clades[n].cat_file]+" "+list_of_clades[n].result+"\n")
results_list.append(list_of_clades[n].result)
return corr_file_name, results_list
def Generate_Script_File_OGLOSS(n, indexname, scriptname):
n = str(n)
a = """#!/bin/bash
#SBATCH -p sched_mit_g4nier
#SBATCH -t 2-00:00:00
#SBATCH -J Tax
#SBATCH --array=1-"""+n+"""
. /etc/profile.d/modules.sh
module add engaging/openmpi/1.8.8
MY_ARRAY_ID=$SLURM_ARRAY_TASK_ID
THE_INDEX="""+indexname+"""
SPECIES_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $2}' )
STRING_NAME=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $3}' )
ACC_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $4}' )
RESULT=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $5}' )
echo $SPECIES_FILE
echo $STRING_NAME
echo $ACC_FILE
mpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT
exit"""
with open(scriptname, "w") as script:
script.write(a)
return scriptname
def Gen_Species_File(list_of_clades, projectname):
list_sp_files = []
for item in list_of_clades:
species_list = item.species_list_original
species_file_name = item.prefix+"_Species_List.txt"
species_list2 = []
for sl2 in species_list:
sl2 = sl2.strip("\"")
species_list2.append(sl2)
spc_file = write_spc_list(species_list2, species_file_name)
item.species_file = species_file_name
list_sp_files.append(species_file_name)
item.result = item.prefix+"_OGL_Result.txt"
return list_sp_files
|
normal
|
{
"blob_id": "5c1324207e24f2d723be33175101102bd97fe7a2",
"index": 4860,
"step-1": "<mask token>\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\n<mask token>\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Str_To_Taxid(string, names_file):\n found = False\n string = string.replace('_', ' ')\n with open(names_file) as names:\n for line in names:\n if '\\t' + string + '\\t' in line:\n taxid_int = re.sub('(\\\\d*)(\\t\\\\|\\t)(' + string +\n ')(\\t)(.*)', '\\\\1', line)\n found = True\n break\n if found is False:\n print('Error finding string: ' + string + ' in file: ' + names_file)\n taxid_int = 'NA'\n return taxid_int\n\n\n<mask token>\n\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid + '\\t':\n apparent_rank = re.sub('(' + taxid +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\5', line)\n apparent_rank = apparent_rank.strip()\n if '\\t' in apparent_rank:\n pass\n else:\n return apparent_rank\n return 'NA'\n\n\n<mask token>\n\n\ndef Return_Parent(taxid, nodes_file):\n len_tax = len(taxid.strip())\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid.strip() + '\\t':\n parent_taxid = re.sub('(' + taxid.strip() +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\3', line)\n if '\\t' in parent_taxid:\n pass\n else:\n return parent_taxid\n print('error finding parent taxa')\n return 'NA'\n\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,\n names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n done = False\n saved_top_level = []\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,\n acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n if i == maxi:\n return 'NA'\n done = True\n else:\n i += 1\n list_ch_remove = []\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return 'NA'\n\n\n<mask token>\n\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n for item in child_list_atup:\n child_list_a.append(item[0])\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return children\n\n\n<mask token>\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\n<mask token>\n\n\ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print('one og sequence choser initiating')\n if '_' in string:\n string = string.replace('_', ' ')\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print('Sisterlist')\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False\n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n if test == 'NA':\n pass\n else:\n print(test)\n return test\n while test == 'NA':\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file\n )\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,\n acc_list)\n if test != 'NA':\n pass\n else:\n return test\n return test\n\n\n<mask token>\n\n\ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print('Err in match spec name - gen list: ' + ssp_name + ' ' + item\n )\n return result\n\n\n<mask token>\n\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n names_list_acc = []\n numbers_list_acc = []\n for raw in list_raw_gene_fastas:\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind] + 1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n cutoff_num = len(list_raw_gene_fastas) / 2\n print(cutoff_num)\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n index += 1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name)\n return a\n\n\ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, 'w') as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item + '\\n')\n return acc_name\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):\n os.system(ssh_inst + \" 'mkdir Taxonomy'\")\n sb_script = script_name\n print(all_files)\n for item in all_files:\n os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')\n os.system(ssh_inst + \" 'cd ~/Taxonomy; sbatch \" + sb_script + \"'\")\n direct = os.getcwd()\n exists = False\n movehome = []\n finished = 'start'\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +\n direct)\n for item in all_result_files:\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = 'yes'\n else:\n finished = False\n print('Tax not done yet. could not locate : ' + item +\n 'checking again in 5 minutes')\n break\n if finished == 'yes':\n print('Should be done!')\n finished = True\n else:\n time.sleep(600)\n finished = 'yes'\n print('Taxonomy parsing complete')\n\n\ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split('.')\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split('/')[-1]\n acc_file = gsf_b + '_Acc_List.txt'\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n else:\n print(\n '....initializing all_acceptables from gene_seq_query file: ' +\n gsf_b + '. this should only happen once...')\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n cat_acc_dict[item] = acc_file\n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,\n cat_acc_dict, projectname)\n n = len(list_to_tax_clades)\n script_name = projectname + '_OGLScript.sh'\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n with open(results_file) as res:\n a = 0\n for line in res:\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split('~')\n print('loss candidates')\n if '' in loss_species:\n loss_species.remove('')\n if '\\n' in loss_species:\n loss_species.remove('\\n')\n item.loss_species_list = loss_species\n print(loss_species)\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print('root: ' + root_species)\n if a == 2:\n print('time:')\n print(line)\n a += 1\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n if root_species == '':\n pass\n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n\n\n<mask token>\n\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\" + n + \"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\" + indexname + \"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, 'w') as script:\n script.write(a)\n return scriptname\n\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix + '_Species_List.txt'\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip('\"')\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix + '_OGL_Result.txt'\n return list_sp_files\n",
"step-3": "<mask token>\n\n\ndef Str_To_Taxid(string, names_file):\n found = False\n string = string.replace('_', ' ')\n with open(names_file) as names:\n for line in names:\n if '\\t' + string + '\\t' in line:\n taxid_int = re.sub('(\\\\d*)(\\t\\\\|\\t)(' + string +\n ')(\\t)(.*)', '\\\\1', line)\n found = True\n break\n if found is False:\n print('Error finding string: ' + string + ' in file: ' + names_file)\n taxid_int = 'NA'\n return taxid_int\n\n\n<mask token>\n\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid + '\\t':\n apparent_rank = re.sub('(' + taxid +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\5', line)\n apparent_rank = apparent_rank.strip()\n if '\\t' in apparent_rank:\n pass\n else:\n return apparent_rank\n return 'NA'\n\n\n<mask token>\n\n\ndef Return_Parent(taxid, nodes_file):\n len_tax = len(taxid.strip())\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid.strip() + '\\t':\n parent_taxid = re.sub('(' + taxid.strip() +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\3', line)\n if '\\t' in parent_taxid:\n pass\n else:\n return parent_taxid\n print('error finding parent taxa')\n return 'NA'\n\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,\n names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n done = False\n saved_top_level = []\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,\n acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n if i == maxi:\n return 'NA'\n done = True\n else:\n i += 1\n list_ch_remove = []\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return 'NA'\n\n\n<mask token>\n\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n for item in child_list_atup:\n child_list_a.append(item[0])\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return children\n\n\ndef Ret_All_Groups_One_Rank_Below(taxid, nodes_file):\n taxid = taxid.strip()\n print('looking for taxid:' + str(taxid))\n rank = Get_Taxid_Rank(taxid, nodes_file)\n print(rank)\n target_rank = One_Rank_Lower(rank)\n if target_rank == 'NA':\n return 'NA'\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n garbage, remove_string = removal_ranks.split(target_rank)\n remove_rank_list = remove_string.split()\n children = []\n list_ch_remove = []\n child_list_a = Taxid_To_Children(taxid, nodes_file)\n done = False\n while done is False:\n for item in child_list_a:\n if item[1] == target_rank:\n children.append(item[0])\n list_ch_remove.append(item)\n if item[1] in remove_rank_list:\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_a.remove(rem)\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_b = Taxid_To_Children(parent[0], nodes_file)\n if child_list_b == []:\n pass\n else:\n for bitem in child_list_b:\n child_list_c.append(bitem)\n child_list_a = child_list_c\n return children\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\ndef Taxid_To_Name(taxid, names_file):\n found = False\n taxid = taxid.strip()\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(names_file) as names:\n for line in names:\n if line[:len_tax_t] == taxid + '\\t':\n name_wanted = re.sub(\n '(\\\\d*)(\\t\\\\|\\t)([^\\t]*)(\\t\\\\|\\t)(.*)(\\t\\\\|\\t)(scientific name)(.*)'\n , '\\\\3', line)\n if '\\t' in name_wanted:\n pass\n else:\n found = True\n break\n if found is False:\n print('Error finding name for: ' + taxid + ' in file: ' + names_file)\n name_wanted = 'NA'\n if found is True:\n name_wanted = name_wanted.strip()\n return name_wanted\n\n\ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print('one og sequence choser initiating')\n if '_' in string:\n string = string.replace('_', ' ')\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print('Sisterlist')\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False\n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n if test == 'NA':\n pass\n else:\n print(test)\n return test\n while test == 'NA':\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file\n )\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,\n acc_list)\n if test != 'NA':\n pass\n else:\n return test\n return test\n\n\ndef Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):\n print('Checking for reps... target rank is: ' + rank)\n list_of_correct_rank = []\n found = []\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n remove_string, garbage = removal_ranks.split(rank)\n remove_rank_list = remove_string.split()\n for species in species_list:\n nid = Str_To_Taxid(species, names_file)\n go = True\n while go is True:\n rp = Return_Parent(nid, nodes_file)\n if rp == 'NA':\n list_of_correct_rank.append(rp)\n go = False\n if rp.strip() == 1:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n par_rank = Get_Taxid_Rank(rp, nodes_file)\n if par_rank == rank:\n rp = rp.strip()\n list_of_correct_rank.append(rp)\n go = False\n elif par_rank in remove_rank_list:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n else:\n nid = rp\n print(tid_list)\n print(list_of_correct_rank)\n for item in tid_list:\n if item in list_of_correct_rank:\n a = tid_list.index(item)\n found.append(tid_list[a])\n return found\n\n\n<mask token>\n\n\ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print('Err in match spec name - gen list: ' + ssp_name + ' ' + item\n )\n return result\n\n\n<mask token>\n\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n names_list_acc = []\n numbers_list_acc = []\n for raw in list_raw_gene_fastas:\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind] + 1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n cutoff_num = len(list_raw_gene_fastas) / 2\n print(cutoff_num)\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n index += 1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name)\n return a\n\n\ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, 'w') as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item + '\\n')\n return acc_name\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):\n os.system(ssh_inst + \" 'mkdir Taxonomy'\")\n sb_script = script_name\n print(all_files)\n for item in all_files:\n os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')\n os.system(ssh_inst + \" 'cd ~/Taxonomy; sbatch \" + sb_script + \"'\")\n direct = os.getcwd()\n exists = False\n movehome = []\n finished = 'start'\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +\n direct)\n for item in all_result_files:\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = 'yes'\n else:\n finished = False\n print('Tax not done yet. could not locate : ' + item +\n 'checking again in 5 minutes')\n break\n if finished == 'yes':\n print('Should be done!')\n finished = True\n else:\n time.sleep(600)\n finished = 'yes'\n print('Taxonomy parsing complete')\n\n\ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split('.')\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split('/')[-1]\n acc_file = gsf_b + '_Acc_List.txt'\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n else:\n print(\n '....initializing all_acceptables from gene_seq_query file: ' +\n gsf_b + '. this should only happen once...')\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n cat_acc_dict[item] = acc_file\n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,\n cat_acc_dict, projectname)\n n = len(list_to_tax_clades)\n script_name = projectname + '_OGLScript.sh'\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n with open(results_file) as res:\n a = 0\n for line in res:\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split('~')\n print('loss candidates')\n if '' in loss_species:\n loss_species.remove('')\n if '\\n' in loss_species:\n loss_species.remove('\\n')\n item.loss_species_list = loss_species\n print(loss_species)\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print('root: ' + root_species)\n if a == 2:\n print('time:')\n print(line)\n a += 1\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n if root_species == '':\n pass\n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n\n\n<mask token>\n\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\" + n + \"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\" + indexname + \"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, 'w') as script:\n script.write(a)\n return scriptname\n\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix + '_Species_List.txt'\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip('\"')\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix + '_OGL_Result.txt'\n return list_sp_files\n",
"step-4": "<mask token>\n\n\ndef Str_To_Taxid(string, names_file):\n found = False\n string = string.replace('_', ' ')\n with open(names_file) as names:\n for line in names:\n if '\\t' + string + '\\t' in line:\n taxid_int = re.sub('(\\\\d*)(\\t\\\\|\\t)(' + string +\n ')(\\t)(.*)', '\\\\1', line)\n found = True\n break\n if found is False:\n print('Error finding string: ' + string + ' in file: ' + names_file)\n taxid_int = 'NA'\n return taxid_int\n\n\n<mask token>\n\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid + '\\t':\n apparent_rank = re.sub('(' + taxid +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\5', line)\n apparent_rank = apparent_rank.strip()\n if '\\t' in apparent_rank:\n pass\n else:\n return apparent_rank\n return 'NA'\n\n\ndef One_Rank_Lower(rank):\n print('looking one level lower than' + rank)\n if rank == 'species':\n print('is species!')\n return 'NA'\n ordered_str = (\n 'superkingdom kingdom phylum class order family genus species')\n ordered_list = ordered_str.split()\n if rank in ordered_list:\n pass\n elif rank == 'NA':\n return 'NA'\n else:\n print(rank + ' is weird')\n return 'NA'\n current = ordered_list.index(rank)\n lowindex = current + 1\n one_lower = ordered_list[lowindex]\n return one_lower\n\n\ndef Return_Parent(taxid, nodes_file):\n len_tax = len(taxid.strip())\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid.strip() + '\\t':\n parent_taxid = re.sub('(' + taxid.strip() +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\3', line)\n if '\\t' in parent_taxid:\n pass\n else:\n return parent_taxid\n print('error finding parent taxa')\n return 'NA'\n\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,\n names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n done = False\n saved_top_level = []\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,\n acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n if i == maxi:\n return 'NA'\n done = True\n else:\n i += 1\n list_ch_remove = []\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return 'NA'\n\n\n<mask token>\n\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n for item in child_list_atup:\n child_list_a.append(item[0])\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return children\n\n\ndef Ret_All_Groups_One_Rank_Below(taxid, nodes_file):\n taxid = taxid.strip()\n print('looking for taxid:' + str(taxid))\n rank = Get_Taxid_Rank(taxid, nodes_file)\n print(rank)\n target_rank = One_Rank_Lower(rank)\n if target_rank == 'NA':\n return 'NA'\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n garbage, remove_string = removal_ranks.split(target_rank)\n remove_rank_list = remove_string.split()\n children = []\n list_ch_remove = []\n child_list_a = Taxid_To_Children(taxid, nodes_file)\n done = False\n while done is False:\n for item in child_list_a:\n if item[1] == target_rank:\n children.append(item[0])\n list_ch_remove.append(item)\n if item[1] in remove_rank_list:\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_a.remove(rem)\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_b = Taxid_To_Children(parent[0], nodes_file)\n if child_list_b == []:\n pass\n else:\n for bitem in child_list_b:\n child_list_c.append(bitem)\n child_list_a = child_list_c\n return children\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\ndef Taxid_To_Name(taxid, names_file):\n found = False\n taxid = taxid.strip()\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(names_file) as names:\n for line in names:\n if line[:len_tax_t] == taxid + '\\t':\n name_wanted = re.sub(\n '(\\\\d*)(\\t\\\\|\\t)([^\\t]*)(\\t\\\\|\\t)(.*)(\\t\\\\|\\t)(scientific name)(.*)'\n , '\\\\3', line)\n if '\\t' in name_wanted:\n pass\n else:\n found = True\n break\n if found is False:\n print('Error finding name for: ' + taxid + ' in file: ' + names_file)\n name_wanted = 'NA'\n if found is True:\n name_wanted = name_wanted.strip()\n return name_wanted\n\n\ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print('one og sequence choser initiating')\n if '_' in string:\n string = string.replace('_', ' ')\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print('Sisterlist')\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False\n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n if test == 'NA':\n pass\n else:\n print(test)\n return test\n while test == 'NA':\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file\n )\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,\n acc_list)\n if test != 'NA':\n pass\n else:\n return test\n return test\n\n\ndef Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):\n print('Checking for reps... target rank is: ' + rank)\n list_of_correct_rank = []\n found = []\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n remove_string, garbage = removal_ranks.split(rank)\n remove_rank_list = remove_string.split()\n for species in species_list:\n nid = Str_To_Taxid(species, names_file)\n go = True\n while go is True:\n rp = Return_Parent(nid, nodes_file)\n if rp == 'NA':\n list_of_correct_rank.append(rp)\n go = False\n if rp.strip() == 1:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n par_rank = Get_Taxid_Rank(rp, nodes_file)\n if par_rank == rank:\n rp = rp.strip()\n list_of_correct_rank.append(rp)\n go = False\n elif par_rank in remove_rank_list:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n else:\n nid = rp\n print(tid_list)\n print(list_of_correct_rank)\n for item in tid_list:\n if item in list_of_correct_rank:\n a = tid_list.index(item)\n found.append(tid_list[a])\n return found\n\n\ndef Choose_Loss_Candidates(string, species_list, names_file, acc_list,\n nodes_file):\n print('loss search initiating')\n if '_' in string:\n print(string)\n string = string.replace('_', ' ')\n print(string)\n taxid = Str_To_Taxid(string, names_file)\n sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)\n if sub_taxids == 'NA':\n print('Error getting loss candidates for string:' + string)\n return []\n subgroup_names = []\n for item in sub_taxids:\n subgroup_names.append(Taxid_To_Name(item, names_file))\n b = Get_Taxid_Rank(taxid, nodes_file)\n a = One_Rank_Lower(b)\n found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)\n print('Representatives already exist for:')\n found_names = []\n for foundtid in found:\n foundtid = foundtid.strip()\n index1 = sub_taxids.index(foundtid)\n found_names.append(subgroup_names.pop(index1))\n del sub_taxids[index1]\n print(found_names)\n print('Looking for one representative from each of the following:')\n print(subgroup_names)\n loss_list = []\n ite = 0\n for item in sub_taxids:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n print(subgroup_names[ite] + ' : ' + test)\n ite += 1\n loss_list.append(test)\n continue\n print('Loss candidates will be added:')\n na = 0\n for item in loss_list:\n if item == 'NA':\n na += 1\n while 'NA' in loss_list:\n loss_list.remove('NA')\n print(loss_list)\n print('there were ' + str(na) + ' ' + a +\n 's that no suitable loss candidate was found for.')\n return loss_list\n\n\ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print('Err in match spec name - gen list: ' + ssp_name + ' ' + item\n )\n return result\n\n\n<mask token>\n\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n names_list_acc = []\n numbers_list_acc = []\n for raw in list_raw_gene_fastas:\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind] + 1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n cutoff_num = len(list_raw_gene_fastas) / 2\n print(cutoff_num)\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n index += 1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name)\n return a\n\n\ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, 'w') as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item + '\\n')\n return acc_name\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):\n os.system(ssh_inst + \" 'mkdir Taxonomy'\")\n sb_script = script_name\n print(all_files)\n for item in all_files:\n os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')\n os.system(ssh_inst + \" 'cd ~/Taxonomy; sbatch \" + sb_script + \"'\")\n direct = os.getcwd()\n exists = False\n movehome = []\n finished = 'start'\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +\n direct)\n for item in all_result_files:\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = 'yes'\n else:\n finished = False\n print('Tax not done yet. could not locate : ' + item +\n 'checking again in 5 minutes')\n break\n if finished == 'yes':\n print('Should be done!')\n finished = True\n else:\n time.sleep(600)\n finished = 'yes'\n print('Taxonomy parsing complete')\n\n\ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split('.')\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split('/')[-1]\n acc_file = gsf_b + '_Acc_List.txt'\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n else:\n print(\n '....initializing all_acceptables from gene_seq_query file: ' +\n gsf_b + '. this should only happen once...')\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n cat_acc_dict[item] = acc_file\n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,\n cat_acc_dict, projectname)\n n = len(list_to_tax_clades)\n script_name = projectname + '_OGLScript.sh'\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n with open(results_file) as res:\n a = 0\n for line in res:\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split('~')\n print('loss candidates')\n if '' in loss_species:\n loss_species.remove('')\n if '\\n' in loss_species:\n loss_species.remove('\\n')\n item.loss_species_list = loss_species\n print(loss_species)\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print('root: ' + root_species)\n if a == 2:\n print('time:')\n print(line)\n a += 1\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n if root_species == '':\n pass\n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n\n\ndef Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):\n corr_file_name = 'Corr_' + projectname + '.txt'\n results_list = []\n with open(corr_file_name, 'w') as corr:\n for n in range(len(list_of_clades)):\n corr.write(str(n + 1) + ' ' + list_of_clades[n].species_file +\n ' ' + list_of_clades[n].string_name + ' ' + cat_acc_dict[\n list_of_clades[n].cat_file] + ' ' + list_of_clades[n].\n result + '\\n')\n results_list.append(list_of_clades[n].result)\n return corr_file_name, results_list\n\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\" + n + \"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\" + indexname + \"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, 'w') as script:\n script.write(a)\n return scriptname\n\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix + '_Species_List.txt'\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip('\"')\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix + '_OGL_Result.txt'\n return list_sp_files\n",
"step-5": "# #!/usr/bin/python\n\n# last edit abigailc@Actaeon on jan 27 2017\n\n#pulling the taxonomy functions out of makespeciestree because I need to make them faster...\n#insects is running for literally >20 hours.\n\n\nnames_file = \"/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/names.dmp\"\nnodes_file = \"/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/nodes.dmp\"\n\n\n\n######### PERSONAL_SETTINGS #########\nssh_inst = \"ssh -l abigailc -i ~/.ssh/id_rsa eofe4.mit.edu\"\nclus_head = \"abigailc@eofe4.mit.edu:/home/abigailc/\"\nPath_Blast = \"/Users/abigailc/blast/\"\n\nimport os\nimport re\nimport time\nimport sys\n#from oxy_mods.Classes_DTL_Detector import Fasta\n\n#BASIC OPERATIONS\ndef Str_To_Taxid(string, names_file):\n #init done\n #turns a string to its taxon id NCBI\n #this is easier than expected. just open names.dmp and find the first hit. format:\n found = False\n #print(\"strtotaxid\")\n #print(string+\" str to taxid\")\n string = string.replace(\"_\", \" \")\n #print(string)\n with open (names_file) as names:\n for line in names:\n \n if \"\\t\"+string+\"\\t\" in line:\n #print(\"got:\"+line)\n taxid_int = re.sub (\"(\\d*)(\\t\\|\\t)(\"+string+\")(\\t)(.*)\", \"\\\\1\", line)\n found = True\n break\n if found is False:\n print(\"Error finding string: \"+string+\" in file: \"+names_file)\n taxid_int = \"NA\"\n return taxid_int\n\ndef Taxid_To_Children(taxid, nodes_file):\n\n #goes one level deeper. finds all taxids that list the given taxid as \"parent\", returns as a list\n childlist = []\n child_rank_list = []\n with open (nodes_file) as nodes:\n for line in nodes:\n if \"\\t\"+taxid+\"\\t\" in line:\n #print(\"gotcha\")\n #print(line)\n #the thing matches, do the re.sub.\n #includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc\n baby_taxid_rank = re.sub(\"(\\d*)(\\t\\|\\t)(\"+taxid+\")(\\t\\|\\t)([a-z]*)(.*)\", \"\\\\1~\\\\5\", line)\n if \"\\t\" in baby_taxid_rank:\n #this happens if the re.sub does not occur - eg if \\ttaxid\\t occured somewhere in the line other than where it should've. \n pass\n else:\n baby_taxid, baby_rank = baby_taxid_rank.split(\"~\")\n #add to list of bbys\n baby_taxid = baby_taxid.strip()\n baby_rank = baby_rank.strip()\n childlist.append(baby_taxid)\n child_rank_list.append((baby_taxid, baby_rank))\n return child_rank_list\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax+1\n #given taxid = 100, len_tax = 3, len_tax_t = 5\n with open (nodes_file) as nodes:\n for line in nodes:\n #print(line[:len_tax_t])\n #print(taxid+\"\\t\")\n if line[:len_tax_t] == taxid+\"\\t\":\n #the thing matches, do the re.sub.\n #includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc\n apparent_rank = re.sub(\"(\"+taxid+\")(\\t\\|\\t)(\\d*)(\\t\\|\\t)([a-z]*)(.*)\", \"\\\\5\", line)\n apparent_rank = apparent_rank.strip()\n if \"\\t\" in apparent_rank:\n pass\n else:\n return apparent_rank\n return \"NA\"\n #returns the rank (eg, \"order\" of a taxid\")\n\ndef One_Rank_Lower(rank):\n print(\"looking one level lower than\"+rank)\n if rank == \"species\":\n print(\"is species!\")\n return \"NA\"\n ordered_str = \"superkingdom kingdom phylum class order family genus species\"\n ordered_list = ordered_str.split()\n if rank in ordered_list:\n pass\n elif rank == \"NA\":\n return \"NA\"\n else:\n print(rank+\" is weird\")\n return \"NA\"\n current = ordered_list.index(rank)\n lowindex = current + 1\n one_lower = ordered_list[lowindex]\n return one_lower\n \n #given phylum, returns class. given class, return order. etc.\n\n# rank = \"class\"\n# string = \"cyanobacteria\"\n# taxid = \"12345\"\n\ndef Return_Parent(taxid, nodes_file):\n #eg for a given rank taxid, find it's up-one-level (not rank) taxid, and return it.\n len_tax = len(taxid.strip())\n len_tax_t = len_tax+1\n #given taxid = 100, len_tax = 3, len_tax_t = 5\n #print(\"searching for one level above taxid:\"+str(taxid))\n #print(\"tiud: \"+taxid)\n with open (nodes_file) as nodes:\n for line in nodes:\n #print(taxid.strip()+\"\\t\")\n #print(line[:len_tax_t])\n if line[:len_tax_t] == taxid.strip()+\"\\t\":\n \n # print(\"got: \"+line)\n #the thing matches, do the re.sub.\n #includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc\n parent_taxid = re.sub(\"(\"+taxid.strip()+\")(\\t\\|\\t)(\\d*)(\\t\\|\\t)([a-z]*)(.*)\", \"\\\\3\", line)\n #print(parent_taxid)\n if \"\\t\" in parent_taxid:\n pass\n else:\n return parent_taxid\n print(\"error finding parent taxa\")\n return(\"NA\")\n\n\n#COMPLEX OPERATIONS\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file, names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n #this is a list of children TAXIDS ONLY\n\n #print(\"initial pass\")\n #print(child_list_atup)\n #print(child_list_a)\n done = False\n saved_top_level = []\n #we're going to do one at a time, so save all, and load them one-by-one.\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n # print(\"maxi: \"+str(maxi))\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n #also lets implement a saved second level... for further spe.\n while done is False:\n for item in child_list_atup:\n if item[1] == \"species\":\n #add the taxid to the list of species_level_children\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name, acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n #remove taxids that were saved at the species level\n #print(list_ch_remove)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n #if all tips have terminated at the species level: you are done.\n if child_list_a == []:\n if i == maxi:\n #print(\"found none\")\n return \"NA\"\n done = True\n else:\n i += 1\n #print(i)\n list_ch_remove = []\n \n atup = saved_top_level[0]\n #print(atup)\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n #print(child_list_atup)\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n #print(\"New parent list:\")\n #print(child_list_atup)\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n #print(child_list_a)\n #children is a list of all species-level TAXIDS that belong to the given group. \n return \"NA\"\n\n#WHY ARE THERE TWO OF THESE???????\ndef Ret_A_Valid_Species_Below(taxid, nodes_file, names_file, acc_list):\n masterlist = []\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n complete = False\n masterlist.append([(taxid, \"starter\")])\n while complete is False:\n #print(masterlist)\n if masterlist == []:\n return(\"NA\")\n #now lookat is the last member of the last list in masterlist.\n now_list = masterlist[-1]\n if now_list == []:\n while [] in masterlist: \n masterlist.remove([])\n if masterlist == []:\n return(\"NA\")\n now_list = masterlist[-1]\n #lookat first member of that list.\n now_tup = now_list[0]\n now_taxid, now_rank = now_tup[0], now_tup[1]\n #see if its a species\n if now_rank == \"species\":\n now_list.remove(now_tup)\n now_name = Taxid_To_Name(now_taxid, names_file)\n if now_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(now_name,acc_list)\n if in_blast is True:\n #now_name is a species_name\n return now_name\n #check if now_tup is valid. if so, return.\n else:\n now_list.remove(now_tup)\n #generate a new list - of the descendents of this one.\n newlist = Taxid_To_Children(now_taxid, nodes_file)\n #print(newlist)\n if newlist == \"NA\":\n pass\n else:\n #add it to masterlist.\n masterlist.append(newlist)\n return(\"Uh, what?\")\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n #this is a list of children TAXIDS ONLY\n for item in child_list_atup:\n child_list_a.append(item[0])\n #print(\"initial pass\")\n #print(child_list_atup)\n #print(child_list_a)\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == \"species\":\n #add the taxid to the list of species_level_children\n children.append(item[0])\n list_ch_remove.append(item)\n #remove taxids that were saved at the species level\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n #if all tips have terminated at the species level: you are done.\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n #for remaining non-species level taxids in lista:\n # -get their children (listb)\n # -add their children to a persistant list(listc)\n # -then set lista(the list to check and remove species-level-entries) to be == listc.\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n #print(\"New parent list:\")\n #print(child_list_atup)\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n #print(child_list_a)\n #children is a list of all species-level TAXIDS that belong to the given group. \n return children\n\n\ndef Ret_All_Groups_One_Rank_Below(taxid, nodes_file):\n taxid = taxid.strip()\n print(\"looking for taxid:\"+str(taxid))\n rank = Get_Taxid_Rank(taxid, nodes_file)\n print(rank)\n #raise SystemExit\n target_rank = One_Rank_Lower(rank)\n if target_rank == \"NA\":\n return(\"NA\")\n removal_ranks = \"superkingdom kingdom phylum class order family genus species\"\n garbage, remove_string = removal_ranks.split(target_rank)\n remove_rank_list = remove_string.split()\n children = []\n list_ch_remove = []\n #print(remove_rank_list)\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n child_list_a = Taxid_To_Children(taxid, nodes_file)\n done = False\n while done is False:\n for item in child_list_a:\n if item[1] == target_rank:\n #add the taxid to the list of species_level_children\n children.append(item[0])\n list_ch_remove.append(item)\n if item[1] in remove_rank_list:\n list_ch_remove.append(item)\n #remove taxids that were saved at the species level\n for rem in list_ch_remove:\n child_list_a.remove(rem)\n #if all tips have terminated at the target species level: you are done.\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n #for remaining non-species level taxids in lista:\n # -get their children (listb)\n # -add their children to a persistant list(listc)\n # -then set lista(the list to check and remove species-level-entries) to be == listc.\n for parent in child_list_a:\n child_list_b = Taxid_To_Children(parent[0], nodes_file)\n if child_list_b == []:\n pass\n else:\n for bitem in child_list_b:\n child_list_c.append(bitem)\n child_list_a = child_list_c\n #print(child_list_a)\n #children is a list of all ONE-RANK-BELOW level TAXIDS that belong to the given group. \n return children\n #runs until all children are found of one rank below. eg (CLASS -> [order1, order 2, order3, order 4)\n #for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.\n\n \n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n #from str rank - get current taxid, go up one level, then get all descendents in a list, remove the current taxid, and return the resulting sister list\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n #sis_and_self is a list of TAXIDS ONLY\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n#sisterlist will be a list of taxids for the sister clades to the current thing. by level, not by rank.\n#todo = implement something to redo if sisterlist is empty.\n\ndef Taxid_To_Name(taxid, names_file):\n #this needs to be the backwards version of Str to Taxid.\n found = False\n taxid = taxid.strip()\n len_tax = len(taxid)\n len_tax_t = len_tax+1\n with open (names_file) as names:\n for line in names:\n if line[:len_tax_t] == taxid+\"\\t\":\n # print(\"got here\")\n name_wanted = re.sub (\"(\\d*)(\\t\\|\\t)([^\\t]*)(\\t\\|\\t)(.*)(\\t\\|\\t)(scientific name)(.*)\", \"\\\\3\", line)\n if \"\\t\" in name_wanted:\n pass\n else:\n found = True\n break\n if found is False:\n print(\"Error finding name for: \"+taxid+\" in file: \"+names_file)\n name_wanted = \"NA\"\n if found is True:\n #print(name_wanted)\n name_wanted = name_wanted.strip()\n return name_wanted\n \ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print(\"one og sequence choser initiating\")\n if \"_\" in string:\n string = string.replace(\"_\", \" \")\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print(\"Sisterlist\")\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False \n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n #spec_sis_list = Ret_All_Species_Below(item, nodes_file)\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)\n if test == \"NA\":\n pass\n else:\n print(test)\n return test\n #if test == \"None\":\n # return \"None\"\n #if nothing in the first level sister list is a valid hit, keep moving up the tree until you get one.\n\n while test == \"NA\":\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)\n if test != \"NA\":\n pass\n else:\n return test\n return test\n \n #print (spec_sis_list)\n #for sis_spec_taxid in spec_sis_list:\n # sis_spec_name = Taxid_To_Name(sis_spec_taxid, names_file)\n # in_blast = Check_Spec_Name_Blast_File(sis_spec_name, blast_file)\n # if in_blast is True:\n # print(\"Outgroup sequence chosen:\"+sis_spec_name)\n # return sis_spec_name\n\n \n\n #double break so we only keep ONE sequence.\n #go all the way down the first one until you get a species-level entry.\n #check if the species-level entry is found in your .blast file (if that is where we are implementing this??? )\n #if not, continue... check each species-level thing you find.\n #this would then need to be included in make_species_trees... and only called if the request is sent directly from Parser_blah_master.\ndef Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):\n print(\"Checking for reps... target rank is: \"+rank)\n list_of_correct_rank = []\n found = []\n removal_ranks = \"superkingdom kingdom phylum class order family genus species\"\n remove_string, garbage = removal_ranks.split(rank)\n remove_rank_list = remove_string.split()\n for species in species_list:\n nid = Str_To_Taxid(species, names_file)\n #go up the ladder\n go = True\n while go is True:\n #get parent taxid\n rp = Return_Parent(nid, nodes_file)\n #if its 1, we're done.\n if rp == \"NA\":\n list_of_correct_rank.append(rp)\n go = False\n if rp.strip() == 1:\n rp = \"NA\"\n list_of_correct_rank.append(rp)\n go = False\n #get rank for that new taxid\n par_rank = Get_Taxid_Rank(rp, nodes_file)\n #if it's what we want it to be, add to list.\n if par_rank == rank:\n rp = rp.strip()\n list_of_correct_rank.append(rp)\n go = False\n #if its a step too high, terminate - we went too far somehow\n elif par_rank in remove_rank_list:\n rp = \"NA\"\n list_of_correct_rank.append(rp)\n go = False\n #else, go up another level and test that one!\n else:\n nid = rp\n print(tid_list)\n print(list_of_correct_rank)\n for item in tid_list:\n if item in list_of_correct_rank:\n a = tid_list.index(item)\n found.append(tid_list[a])\n return found\n\n#@blast_file should actually be a list of raw_blast_FASTA objects\ndef Choose_Loss_Candidates(string, species_list, names_file, acc_list, nodes_file):\n print(\"loss search initiating\")\n if \"_\" in string:\n print(string)\n string = string.replace(\"_\", \" \")\n print(string)\n taxid = Str_To_Taxid(string, names_file)\n #for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.\n sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)\n if sub_taxids == \"NA\":\n print(\"Error getting loss candidates for string:\"+string)\n return([])\n subgroup_names = []\n for item in sub_taxids:\n subgroup_names.append(Taxid_To_Name(item, names_file))\n b = Get_Taxid_Rank(taxid, nodes_file)\n a = One_Rank_Lower(b) \n found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)\n print(\"Representatives already exist for:\")\n found_names = []\n for foundtid in found:\n foundtid = foundtid.strip()\n index1 = sub_taxids.index(foundtid)\n found_names.append(subgroup_names.pop(index1))\n del sub_taxids[index1]\n print(found_names)\n print(\"Looking for one representative from each of the following:\")\n print(subgroup_names)\n loss_list = []\n ite = 0\n # #first check if it is in the output loss list.\n # for item in sub_taxids:\n # with open(saved_loss_candidates) as saved:\n # for line in saved:\n # if item in line:\n # #newthing will be a species name.\n # newthing = re.sub(\"(\"item\")(\\t)(.*)\", \"\\\\3\", line))\n # loss_list.append(newthing)\n # found2.append(item)\n # break\n #remove those found from file from the search list.\n # for item in found2:\n # sub_taxids.pop(item)\n for item in sub_taxids:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)\n #print(test)\n print(subgroup_names[ite]+\" : \"+test)\n ite+=1\n loss_list.append(test)\n continue\n print(\"Loss candidates will be added:\")\n na = 0\n for item in loss_list:\n if item == \"NA\":\n na +=1\n while \"NA\" in loss_list: loss_list.remove(\"NA\")\n \n print(loss_list)\n print(\"there were \"+str(na)+\" \"+a+\"s that no suitable loss candidate was found for.\")\n return loss_list\n #either one per next-level-down\n #or one per next-rank-down\n \ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n \n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print(\"Err in match spec name - gen list: \"+ ssp_name +\" \"+ item)\n return result\n\n \n \ndef Check_Spec_Name_Blast_File(ssp_name, blast_fasta_list):\n lf = (len(blast_fasta_list))\n half = lf/2\n yes = 0\n att = 0\n #print(\"Checking :\"+ssp_name)\n ssp_name = ssp_name.replace(\" \", \"_\")\n ssp_name = ssp_name.strip()\n for current_blast in blast_fasta_list:\n att += 1\n if att > 6:\n if yes < att/3:\n return False\n if ssp_name in current_blast.species_names:\n \n yes += 1\n continue\n else:\n \n #print(ssp_name)\n #print(current_blast.species_names[0])\n for spec in current_blast.species_names:\n if ssp_name in spec:\n yes +=1\n break\n continue\n #print(yes)\n #print(half)\n if yes > half:\n #print(\"validated: \"+ssp_name)\n return True\n else:\n return False\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n #this is printing an empty file. why?\n names_list_acc = []\n numbers_list_acc = []\n \n for raw in list_raw_gene_fastas:\n #do they have species lists?\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind]+1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n #the numbers list can specify a cut off that is necesary for the thing being acceptable\n #for now let's be consistant and use 1/2 of lsit of raw fastas?\n cutoff_num = (len(list_raw_gene_fastas)/2)\n print(cutoff_num)\n #this will be 15 currently. might be .5 sometimes.\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n #means that we dont care if its a decimal or not. 1 will pass .5\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n #add the index to be removed to a list. index into names and numbers should be identicle\n index +=1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n #uhhhhh i think we need to sort the numbers so removal of the largest number happens first so as to not fuck up list order.\n #sorting now. should be good.\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name) \n return a\n \ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, \"w\") as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item+\"\\n\")\n return acc_name\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, \"w\") as spc_list_file:\n for item in spc_list:\n #stripiing strain data from this version of the species_list such that it will \n if \"_\" in item:\n dash_sep = item.split(\"_\")\n item = dash_sep[0]+\"_\"+dash_sep[1]\n spc_list_file.write(item+\"\\n\")\n return spcname\n\n#parser stuff\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name,all_files, all_result_files):\n #here acc list is the name of the acc_list_current_file\n #auto gen an sbatch script\n os.system(ssh_inst+\" \\'mkdir Taxonomy\\'\")\n sb_script = script_name\n #scp it over\n \n print(all_files)\n for item in all_files:\n os.system(\"scp \"+item+\" \"+clus_head+\"Taxonomy\")\n #run it\n\n #edit the script on the cluster to deal with my mistakes\n\n os.system(ssh_inst+\" 'cd ~/Taxonomy; sbatch \"+sb_script+\"'\")\n #scp it back and verify\n direct = os.getcwd()\n exists = False\n #now it should exist locally\n movehome = []\n finished = \"start\"\n #bring home the d\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system(\"scp \"+clus_head+\"Taxonomy/\"+filename+\" \"+direct)\n for item in all_result_files:\n #see if it got moved home.\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = \"yes\"\n else:\n finished = False\n print(\"Tax not done yet. could not locate : \"+item+\"checking again in 5 minutes\")\n break\n if finished == \"yes\":\n print(\"Should be done!\")\n finished = True\n else:\n #wait ten minutes and then try again.\n time.sleep(600)\n finished = \"yes\"\n#TEMPORARILY REMOVED result file deletion from the cluster to make testing progress faster.\n #for item in all_result_files:\n # os.system(ssh_inst+\" 'cd ~/Taxonomy; rm \"+item+\"'\")\n #for item in all_files:\n # os.system(ssh_inst+\" 'cd ~/Taxonomy; rm \"+item+\"'\")\n print(\"Taxonomy parsing complete\")\n #remove the script and the og loss file from cluster\n\n\n\n\n \ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n #the acceptable list should be a list of taxa that are present in at least 50% (?) of the blast hit files for the genes given.\n\n #get all gene-query-files to look at\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n\n #for each, create an acceptable list output name\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split(\".\")\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split(\"/\")[-1]\n acc_file = gsf_b+\"_Acc_List.txt\"\n #print(\"Looking for loss-candidates and a rooting sequence to add....\")\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n #if not already done, actually make the output acceptable list.\n else:\n print(\"....initializing all_acceptables from gene_seq_query file: \"+gsf_b+\". this should only happen once...\")\n #generate it\n #should be passing in A LIST OF ALL THE BLAST_FILES ASSOCIATED WITH THE GENE. eg the things in Raw_Blasts that were consulted.\n #are these stored in each subtree? should pass a list of fasta objects.\n #ist_raw_objects = []\n #rint(list_raws)\n #or raw in list_raws:\n # print(raw.name)\n\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n #this is returning \"NONE\" which is super not okay.\n cat_acc_dict[item] = acc_file\n \n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n\n #check if we already ran the taxonomy and have data downloaded. (this is mostly for while fixing errors; i keep getting stuck at this point & ity is a waste of time to re-run the taxonomy parser.\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n #sets species_file and result to each subtree.\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades, cat_acc_dict, projectname)\n #makes the correlation file.\n #for each clade, generate a species_list, result name, acc_file_name, string_name and print them all to a corr.file\n n = len(list_to_tax_clades)\n #gen the script\n script_name = projectname+\"_OGLScript.sh\"\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile,all_files, results_list)\n \n #run the script\n\n #add loss_species, root_species to each subtree as a value and also add them to the species_list going forward.\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n #open the file and get loss and species results.\n with open(results_file) as res:\n # print(\"opened\")\n a=0\n for line in res:\n #get loss results\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split(\"~\")\n print(\"loss candidates\")\n if \"\" in loss_species:\n loss_species.remove (\"\")\n if \"\\n\" in loss_species:\n loss_species.remove(\"\\n\")\n item.loss_species_list = loss_species \n print(loss_species)\n #get root results\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print(\"root: \"+root_species)\n #get how long it took\n if a == 2:\n print(\"time:\")\n print(line)\n a += 1\n #if no loss, do nothing\n\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n #else, add them to the species list, and also track them(?)\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n \n \n if root_species == \"\":\n pass\n \n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n# os.system(\"rm \"+results_file)\n\n #done\n\ndef Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):\n \n corr_file_name = \"Corr_\"+projectname+\".txt\"\n results_list = []\n with open(corr_file_name, \"w\") as corr:\n for n in range(len(list_of_clades)):\n corr.write(str(n+1)+\" \"+list_of_clades[n].species_file+\" \"+list_of_clades[n].string_name+\" \"+cat_acc_dict[list_of_clades[n].cat_file]+\" \"+list_of_clades[n].result+\"\\n\")\n results_list.append(list_of_clades[n].result)\n return corr_file_name, results_list\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\"+n+\"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\"+indexname+\"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, \"w\") as script:\n script.write(a)\n return scriptname\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix+\"_Species_List.txt\"\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip(\"\\\"\")\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix+\"_OGL_Result.txt\"\n return list_sp_files\n\n",
"step-ids": [
2,
15,
18,
21,
27
]
}
|
[
2,
15,
18,
21,
27
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_p():
import inspect
import re
local_vars = inspect.currentframe().f_back.f_locals
return len(re.findall('p\\s*=\\s*0', str(local_vars))) == 0
<|reserved_special_token_1|>
def check_orthogonal(u, v):
return u.dot(v) == 0
def check_p():
import inspect
import re
local_vars = inspect.currentframe().f_back.f_locals
return len(re.findall('p\\s*=\\s*0', str(local_vars))) == 0
<|reserved_special_token_1|>
def check_orthogonal(u, v):
return u.dot(v) == 0
def check_p():
import inspect
import re
local_vars = inspect.currentframe().f_back.f_locals
return len(re.findall("p\\s*=\\s*0", str(local_vars))) == 0
|
flexible
|
{
"blob_id": "36e538ca7fbdbf6e2e6ca1ae126e4e75940bb5cd",
"index": 4316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_p():\n import inspect\n import re\n local_vars = inspect.currentframe().f_back.f_locals\n return len(re.findall('p\\\\s*=\\\\s*0', str(local_vars))) == 0\n",
"step-3": "def check_orthogonal(u, v):\n return u.dot(v) == 0\n\n\ndef check_p():\n import inspect\n import re\n local_vars = inspect.currentframe().f_back.f_locals\n return len(re.findall('p\\\\s*=\\\\s*0', str(local_vars))) == 0\n",
"step-4": "def check_orthogonal(u, v):\n return u.dot(v) == 0\n\n\ndef check_p():\n import inspect\n import re\n local_vars = inspect.currentframe().f_back.f_locals\n return len(re.findall(\"p\\\\s*=\\\\s*0\", str(local_vars))) == 0\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
n = 5
a = '1'
if n == 1:
print(a)
else:
for i in range(2, n + 1):
if i == 2:
a = '11'
else:
count = 1
for j in range(len(a) - 1):
if j == len(a) - 2 :
if a[j] == a[j + 1]:
count += 1
a = a + count + a[j]
else:
elif a[j] == a[j + 1]:
count += 1
print(a)
else:
a = a + count + a[j]
count = 1
print(a)
|
normal
|
{
"blob_id": "26a778f16cc50d1a8791fb672fb8907464865f3f",
"index": 1349,
"step-1": "n = 5\na = '1'\nif n == 1:\n print(a)\nelse:\n for i in range(2, n + 1):\n if i == 2:\n a = '11'\n else:\n count = 1\n for j in range(len(a) - 1):\n if j == len(a) - 2 :\n if a[j] == a[j + 1]:\n count += 1\n a = a + count + a[j]\n else:\n\n elif a[j] == a[j + 1]:\n count += 1\n print(a)\n else:\n a = a + count + a[j]\n count = 1\n print(a)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# This defines a new interface, called MyClosedInterface
# which is closed (does not allow new members to be added).
# "eci" is the schema id for this extension.
{"fs": { "eci": {
"info": {
"name": "Example closed Interface extension",
"version": "1.0",
"date": "Sept. 22, 2016",
"author": "Jeff Teeters",
"contact": "jteeters@berkeley.edu",
"description": ("Extension defining a new closed Interface")
},
"schema": {
"MyClosedInterface/": {
"merge": ["core:<Interface>/"],
"description": ("A new interface defined in extension e-closed-interface.py."
" This is closed (no new members can be added)."),
"_properties": {"closed": True}, # specify that this group is closed (no new members can be added).
"attributes": {
"foo": {
"description": "example text attributed for MyClosedInterface",
"data_type": "text"}},
"bar": {
"description": ("Example dataset included with MyClosedInterface"),
"data_type": "int",
"dimensions": ["num_measurements"]},
"bazc/": {
"description": ("Example closed group in MyClosedInterface"),
# "_closed": True,
"_properties": {"closed": True}},
"bazo/": {
"description": ("Example open group in MyClosedInterface"),
# "_closed": False,
"_properties": {"closed": False}}
}
}
}}}
|
normal
|
{
"blob_id": "892f90edbd8bd54841b815a6bc29d136c5e84a38",
"index": 7175,
"step-1": "<mask token>\n",
"step-2": "{'fs': {'eci': {'info': {'name': 'Example closed Interface extension',\n 'version': '1.0', 'date': 'Sept. 22, 2016', 'author': 'Jeff Teeters',\n 'contact': 'jteeters@berkeley.edu', 'description':\n 'Extension defining a new closed Interface'}, 'schema': {\n 'MyClosedInterface/': {'merge': ['core:<Interface>/'], 'description':\n 'A new interface defined in extension e-closed-interface.py. This is closed (no new members can be added).'\n , '_properties': {'closed': True}, 'attributes': {'foo': {'description':\n 'example text attributed for MyClosedInterface', 'data_type': 'text'}},\n 'bar': {'description':\n 'Example dataset included with MyClosedInterface', 'data_type': 'int',\n 'dimensions': ['num_measurements']}, 'bazc/': {'description':\n 'Example closed group in MyClosedInterface', '_properties': {'closed': \n True}}, 'bazo/': {'description':\n 'Example open group in MyClosedInterface', '_properties': {'closed': \n False}}}}}}}\n",
"step-3": "# This defines a new interface, called MyClosedInterface\n# which is closed (does not allow new members to be added).\n\n# \"eci\" is the schema id for this extension.\n\n{\"fs\": { \"eci\": {\n\n\"info\": {\n \"name\": \"Example closed Interface extension\",\n \"version\": \"1.0\",\n \"date\": \"Sept. 22, 2016\",\n \"author\": \"Jeff Teeters\",\n \"contact\": \"jteeters@berkeley.edu\",\n \"description\": (\"Extension defining a new closed Interface\")\n },\n \n\"schema\": {\n \"MyClosedInterface/\": {\n \"merge\": [\"core:<Interface>/\"],\n \"description\": (\"A new interface defined in extension e-closed-interface.py.\"\n \" This is closed (no new members can be added).\"),\n \"_properties\": {\"closed\": True}, # specify that this group is closed (no new members can be added).\n \"attributes\": {\n \"foo\": {\n \"description\": \"example text attributed for MyClosedInterface\",\n \"data_type\": \"text\"}},\n \"bar\": {\n \"description\": (\"Example dataset included with MyClosedInterface\"),\n \"data_type\": \"int\",\n \"dimensions\": [\"num_measurements\"]},\n \"bazc/\": {\n \"description\": (\"Example closed group in MyClosedInterface\"),\n # \"_closed\": True,\n \"_properties\": {\"closed\": True}},\n \"bazo/\": {\n \"description\": (\"Example open group in MyClosedInterface\"),\n # \"_closed\": False,\n \"_properties\": {\"closed\": False}}\n }\n}\n\n}}}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf8
from __future__ import absolute_import
import numpy as np
def arr2str(arr, sep=", ", fmt="{}"):
"""
Make a string from a list seperated by ``sep`` and each item formatted
with ``fmt``.
"""
return sep.join([fmt.format(v) for v in arr])
def indent_wrap(s, indent=0, wrap=80):
"""
Wraps and indents a string ``s``.
Parameters
----------
s : str
The string to wrap.
indent : int
How far to indent each new line.
wrape : int
Number of character after which to wrap the string.
Returns
-------
s : str
Indented and wrapped string, each line has length ``wrap``, except the
last one, which may have less than ``wrap`` characters.
Example
-------
>>> s = 2 * "abcdefghijklmnopqrstuvwxyz"
>>> indent_wrap(s, indent=0, wrap=26)
'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'
>>> indent_wrap(s, indent=2, wrap=26)
' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'
"""
split = wrap - indent
chunks = [indent * " " + s[i:i + split] for i in range(0, len(s), split)]
return "\n".join(chunks)
def serialize_ndarrays(d):
"""
Recursively traverse through iterable object ``d`` and convert all occuring
ndarrays to lists to make it JSON serializable.
Note: Works for 1D dicts with ndarrays at first level. Certainly not tested
and meant to work for all use cases.
Made with code from: http://code.activestate.com/recipes/577504/
Parameters
----------
d : iterable
Can be dict, list, set, tuple or frozenset.
Returns
-------
d : iterable
Same as input, but all ndarrays replaced by lists.
"""
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate,
set: enumerate, frozenset: enumerate,
dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d)
def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):
"""
Populate dictionary with data from a given dict ``d``, and check if ``d``
has required and optional keys. Set optionals with default if not present.
If input ``d`` is None and ``required_keys`` is empty, just return
``opt_keys``.
Parameters
----------
d : dict or None
Input dictionary containing the data to be checked. If is ``None``, then
a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a
``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is
not, then a ``ValueError`` israised.
required_keys : list or None, optional
Keys that must be present and set in ``d``. (default: None)
opt_keys : dict or None, optional
Keys that are optional. ``opt_keys`` provides optional keys and default
values ``d`` is filled with if not present in ``d``. (default: None)
noleft : bool, optional
If True, raises a ``KeyError``, when ``d`` contains etxra keys, other
than those given in ``required_keys`` and ``opt_keys``. (default: True)
Returns
-------
out : dict
Contains all required and optional keys, using default values, where
optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is
returned, if ``opt_keys`` was not ``None``.
"""
if required_keys is None:
required_keys = []
if opt_keys is None:
opt_keys = {}
if d is None:
if not required_keys:
if opt_keys is None:
raise TypeError("`d` and òpt_keys` are both None.")
return opt_keys.copy()
else:
raise ValueError("`d` is None, but `required_keys` is not empty.")
d = d.copy()
out = {}
# Set required keys
for key in required_keys:
if key in d:
out[key] = d.pop(key)
else:
raise KeyError("Dict is missing required key '{}'.".format(key))
# Set optional values, if key not given
for key, val in opt_keys.items():
out[key] = d.pop(key, val)
# Complain when extra keys are left and noleft is True
if d and noleft:
raise KeyError("Leftover keys ['{}'].".format(
"', '".join(list(d.keys()))))
return out
|
normal
|
{
"blob_id": "3b4799f43ec497978bea3ac7ecf8c6aaeb2180b4",
"index": 3867,
"step-1": "<mask token>\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef arr2str(arr, sep=', ', fmt='{}'):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef arr2str(arr, sep=', ', fmt='{}'):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\ndef fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n \"\"\"\n Populate dictionary with data from a given dict ``d``, and check if ``d``\n has required and optional keys. Set optionals with default if not present.\n\n If input ``d`` is None and ``required_keys`` is empty, just return\n ``opt_keys``.\n\n Parameters\n ----------\n d : dict or None\n Input dictionary containing the data to be checked. If is ``None``, then\n a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a\n ``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is\n not, then a ``ValueError`` israised.\n required_keys : list or None, optional\n Keys that must be present and set in ``d``. (default: None)\n opt_keys : dict or None, optional\n Keys that are optional. ``opt_keys`` provides optional keys and default\n values ``d`` is filled with if not present in ``d``. (default: None)\n noleft : bool, optional\n If True, raises a ``KeyError``, when ``d`` contains etxra keys, other\n than those given in ``required_keys`` and ``opt_keys``. (default: True)\n\n Returns\n -------\n out : dict\n Contains all required and optional keys, using default values, where\n optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is\n returned, if ``opt_keys`` was not ``None``.\n \"\"\"\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError('`d` and òpt_keys` are both None.')\n return opt_keys.copy()\n else:\n raise ValueError('`d` is None, but `required_keys` is not empty.')\n d = d.copy()\n out = {}\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n if d and noleft:\n raise KeyError(\"Leftover keys ['{}'].\".format(\"', '\".join(list(d.\n keys()))))\n return out\n",
"step-4": "from __future__ import absolute_import\nimport numpy as np\n\n\ndef arr2str(arr, sep=', ', fmt='{}'):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [(indent * ' ' + s[i:i + split]) for i in range(0, len(s), split)]\n return '\\n'.join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n\n def dict_handler(d):\n return d.items()\n handlers = {list: enumerate, tuple: enumerate, set: enumerate,\n frozenset: enumerate, dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n return serialize(d)\n\n\ndef fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n \"\"\"\n Populate dictionary with data from a given dict ``d``, and check if ``d``\n has required and optional keys. Set optionals with default if not present.\n\n If input ``d`` is None and ``required_keys`` is empty, just return\n ``opt_keys``.\n\n Parameters\n ----------\n d : dict or None\n Input dictionary containing the data to be checked. If is ``None``, then\n a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a\n ``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is\n not, then a ``ValueError`` israised.\n required_keys : list or None, optional\n Keys that must be present and set in ``d``. (default: None)\n opt_keys : dict or None, optional\n Keys that are optional. ``opt_keys`` provides optional keys and default\n values ``d`` is filled with if not present in ``d``. (default: None)\n noleft : bool, optional\n If True, raises a ``KeyError``, when ``d`` contains etxra keys, other\n than those given in ``required_keys`` and ``opt_keys``. (default: True)\n\n Returns\n -------\n out : dict\n Contains all required and optional keys, using default values, where\n optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is\n returned, if ``opt_keys`` was not ``None``.\n \"\"\"\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError('`d` and òpt_keys` are both None.')\n return opt_keys.copy()\n else:\n raise ValueError('`d` is None, but `required_keys` is not empty.')\n d = d.copy()\n out = {}\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n if d and noleft:\n raise KeyError(\"Leftover keys ['{}'].\".format(\"', '\".join(list(d.\n keys()))))\n return out\n",
"step-5": "# coding: utf8\n\nfrom __future__ import absolute_import\n\nimport numpy as np\n\n\ndef arr2str(arr, sep=\", \", fmt=\"{}\"):\n \"\"\"\n Make a string from a list seperated by ``sep`` and each item formatted\n with ``fmt``.\n \"\"\"\n return sep.join([fmt.format(v) for v in arr])\n\n\ndef indent_wrap(s, indent=0, wrap=80):\n \"\"\"\n Wraps and indents a string ``s``.\n\n Parameters\n ----------\n s : str\n The string to wrap.\n indent : int\n How far to indent each new line.\n wrape : int\n Number of character after which to wrap the string.\n\n Returns\n -------\n s : str\n Indented and wrapped string, each line has length ``wrap``, except the\n last one, which may have less than ``wrap`` characters.\n\n Example\n -------\n >>> s = 2 * \"abcdefghijklmnopqrstuvwxyz\"\n >>> indent_wrap(s, indent=0, wrap=26)\n 'abcdefghijklmnopqrstuvwxyz\\nabcdefghijklmnopqrstuvwxyz'\n >>> indent_wrap(s, indent=2, wrap=26)\n ' abcdefghijklmnopqrstuvwx\\n yzabcdefghijklmnopqrstuv\\n wxyz'\n \"\"\"\n split = wrap - indent\n chunks = [indent * \" \" + s[i:i + split] for i in range(0, len(s), split)]\n return \"\\n\".join(chunks)\n\n\ndef serialize_ndarrays(d):\n \"\"\"\n Recursively traverse through iterable object ``d`` and convert all occuring\n ndarrays to lists to make it JSON serializable.\n\n Note: Works for 1D dicts with ndarrays at first level. Certainly not tested\n and meant to work for all use cases.\n Made with code from: http://code.activestate.com/recipes/577504/\n\n Parameters\n ----------\n d : iterable\n Can be dict, list, set, tuple or frozenset.\n\n Returns\n -------\n d : iterable\n Same as input, but all ndarrays replaced by lists.\n \"\"\"\n def dict_handler(d):\n return d.items()\n\n handlers = {list: enumerate, tuple: enumerate,\n set: enumerate, frozenset: enumerate,\n dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n\n return serialize(d)\n\n\ndef fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n \"\"\"\n Populate dictionary with data from a given dict ``d``, and check if ``d``\n has required and optional keys. Set optionals with default if not present.\n\n If input ``d`` is None and ``required_keys`` is empty, just return\n ``opt_keys``.\n\n Parameters\n ----------\n d : dict or None\n Input dictionary containing the data to be checked. If is ``None``, then\n a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a\n ``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is\n not, then a ``ValueError`` israised.\n required_keys : list or None, optional\n Keys that must be present and set in ``d``. (default: None)\n opt_keys : dict or None, optional\n Keys that are optional. ``opt_keys`` provides optional keys and default\n values ``d`` is filled with if not present in ``d``. (default: None)\n noleft : bool, optional\n If True, raises a ``KeyError``, when ``d`` contains etxra keys, other\n than those given in ``required_keys`` and ``opt_keys``. (default: True)\n\n Returns\n -------\n out : dict\n Contains all required and optional keys, using default values, where\n optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is\n returned, if ``opt_keys`` was not ``None``.\n \"\"\"\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError(\"`d` and òpt_keys` are both None.\")\n return opt_keys.copy()\n else:\n raise ValueError(\"`d` is None, but `required_keys` is not empty.\")\n\n d = d.copy()\n out = {}\n # Set required keys\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n # Set optional values, if key not given\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n # Complain when extra keys are left and noleft is True\n if d and noleft:\n raise KeyError(\"Leftover keys ['{}'].\".format(\n \"', '\".join(list(d.keys()))))\n return out\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import json
import csv
import re
import requests
import spacy
import nltk
from nltk.parse import CoreNLPParser
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
from time import time
nlp = spacy.load('es_core_news_sm')
from modules_api import conts_log
sw_spanish="./data/stop-esp.txt"
sw_english="./data/stop-eng.txt"
inner_spanish="./data/inner-stop-esp.txt"
inner_english="./data/inner-stop-eng.txt"
import stanza
### METODO PARA EL SERVICIO
'''
como el main de debajo. este método va a ser el controlador.
Mediante parámetros va a decidir qué procesos va a seguir
termList: array/lista de terminos
lang: string con el idoma : es, en
timeEx: booleano que activa si se aplica timex o no
patternBasedClean: booleano que activa si se aplican patrones o no
pluralClean: booleano que activa si se aplica limpieza de plurales o no
numbersClean: booleano que activa si se aplica limpieza de numeros o no
accentClean: booleano que activa si se aplica limpieza de acentos o no
'''
def preprocessing_terms(termlist, lang_in, timeEx, patternBasedClean, pluralClean, numbersClean, accentClean):
date='2020-06-03' # esto debería ser automatico
print('terms:', termlist)
print('lang:', lang_in)
# servicio básico, creo que se debería hacer siempre
processedTerms=clean_terms(termlist, lang_in)
print('This is processedTerms ')
print(processedTerms)
#print('this is timex' + timeEx)
# Todo siempre sobre la misma variable: processedTerms. Da igual el camino que cojas. Usas la lista de terminos y se modifica.
#opcional
if(timeEx==True):
processedTerms='| '.join(processedTerms).replace('-', '').replace(',', '').replace(';', '')
processedTerms=annotate_timex(processedTerms, date, lang_in)
processedTerms.sort()
#opcional
if((lang_in=='es') and (patternBasedClean==True)):
stanza.download('es')
pos_tagger=stanza.Pipeline('es')
processedTerms=delete_pattern(processedTerms, pos_tagger)
if((lang_in=='en') and (patternBasedClean==True)):
stanza.download('en')
pos_tagger=stanza.Pipeline('en')
processedTerms=delete_pattern(processedTerms, pos_tagger)
#opcional
if((lang_in=='es') and (pluralClean==True)):
processedTerms=quit_plural(processedTerms)
#opcional
if(numbersClean==True):
processedTerms=delete_numbers(processedTerms)
#opcional
if(accentClean==True):
processedTerms=acentos(processedTerms)
#final clean
processedTerms=clean_terms(processedTerms, lang_in)
#devolvemos los terminos
return processedTerms
# 0 clean punctuation and stopwords
def clean_terms(termlist, lang_in):
start_time=time()
if(lang_in=="es"):
stop=stopwords.words('spanish')
file=open(sw_spanish, 'r', encoding='utf-8')
mystop=file.readlines()
elif(lang_in=="en"):
stop=stopwords.words('english')
file=open(sw_english, 'r', encoding='utf-8')
mystop=file.readlines()
clean_list = []
cont=0
for i in mystop:
#print(i.strip())
stop.append(i.strip())
#print(stop)
deletes=[]
for i in termlist:
k=i.strip(',.:')
# print(k)
if ((k.lower() in stop) or (k in stop)):
deletes.append(k)
elif ((k.lower() not in stop) or (k not in stop)):
clean_list.append(k.replace(',', '').replace('-', ''))
print(deletes)
cont=len(termlist)-len(clean_list)
elapsed_time=time()-start_time
txt='CLEAN_TERMS, DELETE ('+str(cont)+') NEW LIST SIZE: ('+str(len(clean_list))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
print('CLEAN_TERMS, DELETE', cont, len(clean_list), elapsed_time )
return(clean_list)
# 1 añotador
def annotate_timex(text, date, lang):
f=open('texto.txt', 'w')
f.write(text)
textanotador2=''
start_time=time()
url = 'https://annotador.oeg.fi.upm.es/annotate'
params = "{\"inputText\":\""+text+"\",\"inputDate\":\"\",\"domain\":\"legal\",\"lan\":\""+lang+"\",\"format\":\"timex3\"}"
headers = {
'Content-Type': 'application/json;charset=utf-8'
}
#response=requests.post(url, data=params)
response=requests.request("POST", url, headers=headers, data = params.encode('utf8'))
textanotador=response.text
print('ENTRA ANOTADOR')
print(textanotador)
code=response.status_code
list_anotador=textanotador.split('|')
print(list_anotador)
deletes=[]
cont=0
for i in list_anotador:
if('<' in i and len(i)>2):
cont=cont+1
deletes.append(i)
ind=list_anotador.index(i)
list_anotador.pop(ind)
for i in list_anotador:
if('<' in i and len(i)>2):
print(i)
cont=cont+1
deletes.append(i)
ind=list_anotador.index(i)
list_anotador.pop(ind)
anotador=[]
for i in list_anotador:
anotador.append(i.strip().replace(',', ''))
if(code!=200):
print('WARNING: Annotador is down. Temporal expressions could not be removed.' )
anotador=text.split('| ')
conts_log.error('Annotador is down. Temporal expressions could not be removed.', code)
else:
elapsed_time=time()-start_time
txt='AÑOTADOR, DELETE ('+str(cont)+') NEW LIST SIZE: ('+str(len(anotador))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('AÑOTADOR DELETE', cont, len(anotador), elapsed_time )
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(anotador)
def infinitive(verb):
if(verb[-2:]=='ar' or verb[-2:]=='er' or verb[-2:]=='ir'):
verb=verb
else:
if(verb[-2:]=='rá' ):
#print('---',verb,'-',verb[:-1])
verb=verb[:-1]
if(verb[-2:]=='án'):
#print('---',verb,'-',verb[:-2])
verb=verb[:-2]
if(verb[-2:]=='ré'):
#print('---',verb,'-',verb[:-1])
verb=verb[:-1]
return (verb)
# 2.1 patrones es
def delete_pattern(anotador, pos_tagger):
total=0
deletes=[]
start_time=time()
lemmas_list=[]
cont=0
cont_inf=0
cont_post=0
for i in anotador:
print('this is i')
print(i)
if(len(i)>1):
#print( i, i.split(' ') )
#pos_tagger = CoreNLPParser('https://corenlp.run/', tagtype='pos')
#si se cae el de lynx, probar con este https://corenlp.run/
#print(i)
doc=pos_tagger(i)
#print(doc)
sent=doc.sentences[0]
word=sent.words
tag=[]
for token in word:
pos=token.upos
term=token.text
tupla=(term, pos)
tag.append(tupla)
print(token.text)
print(pos)
#tag=pos_tagger.tag(i.split(' '))
print('this is tag ')
print(tag)
total=total+1
joini=i
list_pos=[]
spl=joini.split(' ')
if(joini!=''):
join_tag=''
for t in tag:
print('this is t')
print(t)
if(t[1] == 'AUX' ):
doc=nlp(t[0])
lemlist=[tok.lemma_ for tok in doc]
lem=''.join(lemlist)
lemmas_list.append(lem)
if(lem==i):
lem=t[0]
list_pos.append('aux--'+str(lem))
if(len(spl)==1):
ind=anotador.index(str(i))
anotador[ind]=str(lem)
if(t[1] == 'NOUN'):
list_pos.append('noun-'+str(t[0]))
if(t[1] == 'VERB'):
cont_inf=cont_inf+1
doc=nlp(t[0])
for tok in doc:
l=tok.lemma_
if(l!=t[0]):
cont_post=cont_post+1
lemlist=[tok.lemma_ for tok in doc]
lem=''.join(lemlist)
lemmas_list.append(lem)
if(lem==i):
lem=t[0]
list_pos.append('verb-'+str(lem))
if(len(spl)==1):
ind=anotador.index(str(i))
anotador[ind]=str(lem)
if(t[1] == 'ADV'):
list_pos.append('adv--'+str(t[0]))
if(t[1] == 'ADJ'):
list_pos.append('adj--'+str(t[0]))
if(t[1] == 'SCONJ'):
list_pos.append('sconj'+str(t[0]))
spl_i=joini.split(' ')
if(len(list_pos)==1):
pos1=list_pos[0]
if(pos1[0:4]=='adv-' ):
term=pos1[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
elif(len(list_pos)==2 and len(spl_i)==2):
pos1=list_pos[0]
pos2=list_pos[1]
term=''
if(pos1[0:4]=='aux-' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='adj-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
elif(len(list_pos)==3 and len(spl_i)==3):
#print(list_pos, spl_i,'-', len(list_pos), len(spl_i))
pos1=list_pos[0]
pos2=list_pos[1]
pos3=list_pos[2]
term=''
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='noun' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='noun' and pos3[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='noun' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='noun' and pos3[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='verb' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='adj-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='noun' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='adv-' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='adv-' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='adv-' and pos3[0:4]=='scon' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='scon' and pos3[0:4]=='adv-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='verb' and pos3[0:4]=='verb' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
for i in deletes:
if(i in anotador):
ind=anotador.index(i)
anotador.pop(ind)
elapsed_time=time()-start_time
txt='PATRONES, DELETE'+' ('+str(cont)+') NEW LIST SIZE: ('+str(len(anotador))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('PATRONES DELETE', cont, len(anotador), elapsed_time)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(anotador)
# 3 plurales
def quit_plural(valuelist):
start_time=time()
file=open('./data/numberlist_es', 'r', encoding='utf-8')
read=file.readlines()
plural=[]
cont=0
for i in valuelist:
ind=valuelist.index(i)
term=i.replace(',', '').replace('-', ' ')
valuelist[ind]=term
plu=''
if('es' in term[-2:] or 's' in term[-1:]):
slp=term.split(' ')
for n in read:
if(n[:-1] in slp):
plu=i
if not len(plu):
for j in slp:
if( ('es' in j[-2:] ) and 't' not in j[-3:-2] and 'l' not in j[-3:-2] or ('les' in j[-3:] ) ):
plu+=' '+j[:-2]
if('on' in plu[-2:]):
plu=' '+plu[:-2]+'ón'
if('v' in plu[-1:]):
plu=' '+plu+'e'
if('bl' in plu[-2:]):
plu=' '+plu+'e'
if('br' in plu[-2:]):
plu=' '+plu+'e'
elif(('s' in j[-1:]) ):
plu+=' '+j[:-1]
pos=slp.index(j)
if(pos>0):
bef=slp[0]
if('n' in bef[-1:] and 'ón' not in bef[-2:]):
splb=plu.split(' ')
firts=splb[1]
if('n' not in firts[-1:]):
pass
else:
plu0=firts[:-1]
join1=' '.join(splb[2:])
plu=plu0+' '+join1
else:
plu+=' '+j
ind=valuelist.index(term)
valuelist[ind]=plu.strip()
cont=cont+1
quit_plu=[]
nuevalista=set(valuelist)
for i in nuevalista:
quit_plu.append(i)
deletes = []
new=[]
for i in valuelist:
if i not in new:
new.append(i)
else:
deletes.append(i)
#print('plurañes eliminadas ->', deletes)
elapsed_time=time()-start_time
txt='PLURAL, DELETE'+' ('+str(len(valuelist)-len(quit_plu))+') NEW LIST SIZE: ('+str(len(quit_plu))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('PLURALES DELETE', len(valuelist)-len(quit_plu), len(quit_plu), elapsed_time)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(quit_plu)
# 4 numeros
def delete_numbers(list_):
start_time=time()
file=open('./data/numberlist_es', 'r', encoding='utf-8')
read=file.readlines()
cont=0
deletes=[]
for i in read:
if(i[-1:]=='\n'):
i=i[:-1]
for j in list_:
if(' '+i+' ' in ' '+j+' ' ):
deletes.append(j)
ind=list_.index(j)
cont=cont+1
list_.pop(ind)
#list_.sort()
elapsed_time=time()-start_time
txt='NUMBERS, DELETE'+' ('+str(cont)+') NEW LIST SIZE: ('+str(len(list_))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('NUMEROS DELETE', cont, len(list_), elapsed_time)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(list_)
# 5 leer archivo
def readFile(read):
start_time=time()
text=''
for i in read:
if(i[-1:]=='\n'):
spl=i[:-1].split('\t')
else:
spl=i.split('\t')
term=spl[1].replace('-', '').replace(',', '').replace(';', '')
spl2=term.split(' ')
text+='| '+spl[1]
elapsed_time=time()-start_time
return text
#elimina tildes
def quit_tilds(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
)
for a, b in replacements:
s = s.replace(a, b)
return s
def acentos(last):
start_time=time()
til=[]
list_acentos=[]
for i in last:
acento=re.search("[áéíóúÁÉÍÓÚ]+", i)
if(acento!=None):
sin=quit_tilds(i)
list_acentos.append(i)
til.append(sin)
else:
til.append(i)
til2 = []
delete=[]
for i in til:
if i not in til2:
til2.append(i)
else:
delete.append(i)
indices=[]
delete2=[]
for i in last:
if(i in delete and i not in indices):
indices.append(i)
delete2.append(i)
for i in delete2:
ind=last.index(i)
last.pop(ind)
last.sort()
elapsed_time=time()-start_time
return(last)
#-------MAIN-------#
def main(read, lang_in):
start_time=time()
text=readFile(read)
date='2020-06-03'
lang=lang_in
termlist=text.split('| ')
print('RECIBE', termlist)
clean_text=clean_terms(termlist, lang_in)
join_clean_text='| '.join(clean_text).replace('-', '').replace(',', '').replace(';', '')
anotador=annotate_timex(join_clean_text, date, lang)
anotador.sort()
if(lang_in=='es'):
pattern=delete_pattern(anotador)
plural=quit_plural(pattern)
numbers=delete_numbers(plural)
tildes=acentos(numbers)
stop2=clean_terms(tildes, lang_in)
print('FINALES', stop2)
'''new=open('../data/clean_terms_freq4.txt', 'w')#se imprime lo que se queda
for i in stop2:
new.write(i+'\n')
new.close()
elapsed_time=time()-start_time
print('Main', elapsed_time)
return(stop2)'''
#file=open('../data/estatuto_es.txt', 'r', encoding='utf-8')
#read=file.readlines()
#main(read)
|
normal
|
{
"blob_id": "afb0359f4cdf5ed32bb785d969e9bf8919bb6add",
"index": 3408,
"step-1": "<mask token>\n\n\ndef preprocessing_terms(termlist, lang_in, timeEx, patternBasedClean,\n pluralClean, numbersClean, accentClean):\n date = '2020-06-03'\n print('terms:', termlist)\n print('lang:', lang_in)\n processedTerms = clean_terms(termlist, lang_in)\n print('This is processedTerms ')\n print(processedTerms)\n if timeEx == True:\n processedTerms = '| '.join(processedTerms).replace('-', '').replace(','\n , '').replace(';', '')\n processedTerms = annotate_timex(processedTerms, date, lang_in)\n processedTerms.sort()\n if lang_in == 'es' and patternBasedClean == True:\n stanza.download('es')\n pos_tagger = stanza.Pipeline('es')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'en' and patternBasedClean == True:\n stanza.download('en')\n pos_tagger = stanza.Pipeline('en')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'es' and pluralClean == True:\n processedTerms = quit_plural(processedTerms)\n if numbersClean == True:\n processedTerms = delete_numbers(processedTerms)\n if accentClean == True:\n processedTerms = acentos(processedTerms)\n processedTerms = clean_terms(processedTerms, lang_in)\n return processedTerms\n\n\ndef clean_terms(termlist, lang_in):\n start_time = time()\n if lang_in == 'es':\n stop = stopwords.words('spanish')\n file = open(sw_spanish, 'r', encoding='utf-8')\n mystop = file.readlines()\n elif lang_in == 'en':\n stop = stopwords.words('english')\n file = open(sw_english, 'r', encoding='utf-8')\n mystop = file.readlines()\n clean_list = []\n cont = 0\n for i in mystop:\n stop.append(i.strip())\n deletes = []\n for i in termlist:\n k = i.strip(',.:')\n if k.lower() in stop or k in stop:\n deletes.append(k)\n elif k.lower() not in stop or k not in stop:\n clean_list.append(k.replace(',', '').replace('-', ''))\n print(deletes)\n cont = len(termlist) - len(clean_list)\n elapsed_time = time() - start_time\n txt = 'CLEAN_TERMS, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (clean_list)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n print('CLEAN_TERMS, DELETE', cont, len(clean_list), elapsed_time)\n return clean_list\n\n\ndef annotate_timex(text, date, lang):\n f = open('texto.txt', 'w')\n f.write(text)\n textanotador2 = ''\n start_time = time()\n url = 'https://annotador.oeg.fi.upm.es/annotate'\n params = ('{\"inputText\":\"' + text +\n '\",\"inputDate\":\"\",\"domain\":\"legal\",\"lan\":\"' + lang +\n '\",\"format\":\"timex3\"}')\n headers = {'Content-Type': 'application/json;charset=utf-8'}\n response = requests.request('POST', url, headers=headers, data=params.\n encode('utf8'))\n textanotador = response.text\n print('ENTRA ANOTADOR')\n print(textanotador)\n code = response.status_code\n list_anotador = textanotador.split('|')\n print(list_anotador)\n deletes = []\n cont = 0\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n print(i)\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n anotador = []\n for i in list_anotador:\n anotador.append(i.strip().replace(',', ''))\n if code != 200:\n print(\n 'WARNING: Annotador is down. Temporal expressions could not be removed.'\n )\n anotador = text.split('| ')\n conts_log.error(\n 'Annotador is down. Temporal expressions could not be removed.',\n code)\n else:\n elapsed_time = time() - start_time\n txt = 'AÑOTADOR, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('AÑOTADOR DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef infinitive(verb):\n if verb[-2:] == 'ar' or verb[-2:] == 'er' or verb[-2:] == 'ir':\n verb = verb\n else:\n if verb[-2:] == 'rá':\n verb = verb[:-1]\n if verb[-2:] == 'án':\n verb = verb[:-2]\n if verb[-2:] == 'ré':\n verb = verb[:-1]\n return verb\n\n\ndef delete_pattern(anotador, pos_tagger):\n total = 0\n deletes = []\n start_time = time()\n lemmas_list = []\n cont = 0\n cont_inf = 0\n cont_post = 0\n for i in anotador:\n print('this is i')\n print(i)\n if len(i) > 1:\n doc = pos_tagger(i)\n sent = doc.sentences[0]\n word = sent.words\n tag = []\n for token in word:\n pos = token.upos\n term = token.text\n tupla = term, pos\n tag.append(tupla)\n print(token.text)\n print(pos)\n print('this is tag ')\n print(tag)\n total = total + 1\n joini = i\n list_pos = []\n spl = joini.split(' ')\n if joini != '':\n join_tag = ''\n for t in tag:\n print('this is t')\n print(t)\n if t[1] == 'AUX':\n doc = nlp(t[0])\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('aux--' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'NOUN':\n list_pos.append('noun-' + str(t[0]))\n if t[1] == 'VERB':\n cont_inf = cont_inf + 1\n doc = nlp(t[0])\n for tok in doc:\n l = tok.lemma_\n if l != t[0]:\n cont_post = cont_post + 1\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('verb-' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'ADV':\n list_pos.append('adv--' + str(t[0]))\n if t[1] == 'ADJ':\n list_pos.append('adj--' + str(t[0]))\n if t[1] == 'SCONJ':\n list_pos.append('sconj' + str(t[0]))\n spl_i = joini.split(' ')\n if len(list_pos) == 1:\n pos1 = list_pos[0]\n if pos1[0:4] == 'adv-':\n term = pos1[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 2 and len(spl_i) == 2:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n term = ''\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 3 and len(spl_i) == 3:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n pos3 = list_pos[2]\n term = ''\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'scon' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'scon' and pos3[0:4\n ] == 'adv-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n for i in deletes:\n if i in anotador:\n ind = anotador.index(i)\n anotador.pop(ind)\n elapsed_time = time() - start_time\n txt = 'PATRONES, DELETE' + ' (' + str(cont) + ') NEW LIST SIZE: (' + str(\n len(anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PATRONES DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef quit_plural(valuelist):\n start_time = time()\n file = open('./data/numberlist_es', 'r', encoding='utf-8')\n read = file.readlines()\n plural = []\n cont = 0\n for i in valuelist:\n ind = valuelist.index(i)\n term = i.replace(',', '').replace('-', ' ')\n valuelist[ind] = term\n plu = ''\n if 'es' in term[-2:] or 's' in term[-1:]:\n slp = term.split(' ')\n for n in read:\n if n[:-1] in slp:\n plu = i\n if not len(plu):\n for j in slp:\n if 'es' in j[-2:] and 't' not in j[-3:-2] and 'l' not in j[\n -3:-2] or 'les' in j[-3:]:\n plu += ' ' + j[:-2]\n if 'on' in plu[-2:]:\n plu = ' ' + plu[:-2] + 'ón'\n if 'v' in plu[-1:]:\n plu = ' ' + plu + 'e'\n if 'bl' in plu[-2:]:\n plu = ' ' + plu + 'e'\n if 'br' in plu[-2:]:\n plu = ' ' + plu + 'e'\n elif 's' in j[-1:]:\n plu += ' ' + j[:-1]\n pos = slp.index(j)\n if pos > 0:\n bef = slp[0]\n if 'n' in bef[-1:] and 'ón' not in bef[-2:]:\n splb = plu.split(' ')\n firts = splb[1]\n if 'n' not in firts[-1:]:\n pass\n else:\n plu0 = firts[:-1]\n join1 = ' '.join(splb[2:])\n plu = plu0 + ' ' + join1\n else:\n plu += ' ' + j\n ind = valuelist.index(term)\n valuelist[ind] = plu.strip()\n cont = cont + 1\n quit_plu = []\n nuevalista = set(valuelist)\n for i in nuevalista:\n quit_plu.append(i)\n deletes = []\n new = []\n for i in valuelist:\n if i not in new:\n new.append(i)\n else:\n deletes.append(i)\n elapsed_time = time() - start_time\n txt = 'PLURAL, DELETE' + ' (' + str(len(valuelist) - len(quit_plu)\n ) + ') NEW LIST SIZE: (' + str(len(quit_plu)) + ') TIME: (' + str(\n elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PLURALES DELETE', len(valuelist) - len(quit_plu), len(quit_plu),\n elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return quit_plu\n\n\n<mask token>\n\n\ndef quit_tilds(s):\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú', 'u')\n for a, b in replacements:\n s = s.replace(a, b)\n return s\n\n\n<mask token>\n\n\ndef main(read, lang_in):\n start_time = time()\n text = readFile(read)\n date = '2020-06-03'\n lang = lang_in\n termlist = text.split('| ')\n print('RECIBE', termlist)\n clean_text = clean_terms(termlist, lang_in)\n join_clean_text = '| '.join(clean_text).replace('-', '').replace(',', ''\n ).replace(';', '')\n anotador = annotate_timex(join_clean_text, date, lang)\n anotador.sort()\n if lang_in == 'es':\n pattern = delete_pattern(anotador)\n plural = quit_plural(pattern)\n numbers = delete_numbers(plural)\n tildes = acentos(numbers)\n stop2 = clean_terms(tildes, lang_in)\n print('FINALES', stop2)\n \"\"\"new=open('../data/clean_terms_freq4.txt', 'w')#se imprime lo que se queda\n\n\tfor i in stop2:\n\t new.write(i+'\n')\n\tnew.close()\n\telapsed_time=time()-start_time\n\tprint('Main', elapsed_time)\n\treturn(stop2)\"\"\"\n",
"step-2": "<mask token>\n\n\ndef preprocessing_terms(termlist, lang_in, timeEx, patternBasedClean,\n pluralClean, numbersClean, accentClean):\n date = '2020-06-03'\n print('terms:', termlist)\n print('lang:', lang_in)\n processedTerms = clean_terms(termlist, lang_in)\n print('This is processedTerms ')\n print(processedTerms)\n if timeEx == True:\n processedTerms = '| '.join(processedTerms).replace('-', '').replace(','\n , '').replace(';', '')\n processedTerms = annotate_timex(processedTerms, date, lang_in)\n processedTerms.sort()\n if lang_in == 'es' and patternBasedClean == True:\n stanza.download('es')\n pos_tagger = stanza.Pipeline('es')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'en' and patternBasedClean == True:\n stanza.download('en')\n pos_tagger = stanza.Pipeline('en')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'es' and pluralClean == True:\n processedTerms = quit_plural(processedTerms)\n if numbersClean == True:\n processedTerms = delete_numbers(processedTerms)\n if accentClean == True:\n processedTerms = acentos(processedTerms)\n processedTerms = clean_terms(processedTerms, lang_in)\n return processedTerms\n\n\ndef clean_terms(termlist, lang_in):\n start_time = time()\n if lang_in == 'es':\n stop = stopwords.words('spanish')\n file = open(sw_spanish, 'r', encoding='utf-8')\n mystop = file.readlines()\n elif lang_in == 'en':\n stop = stopwords.words('english')\n file = open(sw_english, 'r', encoding='utf-8')\n mystop = file.readlines()\n clean_list = []\n cont = 0\n for i in mystop:\n stop.append(i.strip())\n deletes = []\n for i in termlist:\n k = i.strip(',.:')\n if k.lower() in stop or k in stop:\n deletes.append(k)\n elif k.lower() not in stop or k not in stop:\n clean_list.append(k.replace(',', '').replace('-', ''))\n print(deletes)\n cont = len(termlist) - len(clean_list)\n elapsed_time = time() - start_time\n txt = 'CLEAN_TERMS, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (clean_list)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n print('CLEAN_TERMS, DELETE', cont, len(clean_list), elapsed_time)\n return clean_list\n\n\ndef annotate_timex(text, date, lang):\n f = open('texto.txt', 'w')\n f.write(text)\n textanotador2 = ''\n start_time = time()\n url = 'https://annotador.oeg.fi.upm.es/annotate'\n params = ('{\"inputText\":\"' + text +\n '\",\"inputDate\":\"\",\"domain\":\"legal\",\"lan\":\"' + lang +\n '\",\"format\":\"timex3\"}')\n headers = {'Content-Type': 'application/json;charset=utf-8'}\n response = requests.request('POST', url, headers=headers, data=params.\n encode('utf8'))\n textanotador = response.text\n print('ENTRA ANOTADOR')\n print(textanotador)\n code = response.status_code\n list_anotador = textanotador.split('|')\n print(list_anotador)\n deletes = []\n cont = 0\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n print(i)\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n anotador = []\n for i in list_anotador:\n anotador.append(i.strip().replace(',', ''))\n if code != 200:\n print(\n 'WARNING: Annotador is down. Temporal expressions could not be removed.'\n )\n anotador = text.split('| ')\n conts_log.error(\n 'Annotador is down. Temporal expressions could not be removed.',\n code)\n else:\n elapsed_time = time() - start_time\n txt = 'AÑOTADOR, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('AÑOTADOR DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef infinitive(verb):\n if verb[-2:] == 'ar' or verb[-2:] == 'er' or verb[-2:] == 'ir':\n verb = verb\n else:\n if verb[-2:] == 'rá':\n verb = verb[:-1]\n if verb[-2:] == 'án':\n verb = verb[:-2]\n if verb[-2:] == 'ré':\n verb = verb[:-1]\n return verb\n\n\ndef delete_pattern(anotador, pos_tagger):\n total = 0\n deletes = []\n start_time = time()\n lemmas_list = []\n cont = 0\n cont_inf = 0\n cont_post = 0\n for i in anotador:\n print('this is i')\n print(i)\n if len(i) > 1:\n doc = pos_tagger(i)\n sent = doc.sentences[0]\n word = sent.words\n tag = []\n for token in word:\n pos = token.upos\n term = token.text\n tupla = term, pos\n tag.append(tupla)\n print(token.text)\n print(pos)\n print('this is tag ')\n print(tag)\n total = total + 1\n joini = i\n list_pos = []\n spl = joini.split(' ')\n if joini != '':\n join_tag = ''\n for t in tag:\n print('this is t')\n print(t)\n if t[1] == 'AUX':\n doc = nlp(t[0])\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('aux--' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'NOUN':\n list_pos.append('noun-' + str(t[0]))\n if t[1] == 'VERB':\n cont_inf = cont_inf + 1\n doc = nlp(t[0])\n for tok in doc:\n l = tok.lemma_\n if l != t[0]:\n cont_post = cont_post + 1\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('verb-' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'ADV':\n list_pos.append('adv--' + str(t[0]))\n if t[1] == 'ADJ':\n list_pos.append('adj--' + str(t[0]))\n if t[1] == 'SCONJ':\n list_pos.append('sconj' + str(t[0]))\n spl_i = joini.split(' ')\n if len(list_pos) == 1:\n pos1 = list_pos[0]\n if pos1[0:4] == 'adv-':\n term = pos1[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 2 and len(spl_i) == 2:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n term = ''\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 3 and len(spl_i) == 3:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n pos3 = list_pos[2]\n term = ''\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'scon' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'scon' and pos3[0:4\n ] == 'adv-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n for i in deletes:\n if i in anotador:\n ind = anotador.index(i)\n anotador.pop(ind)\n elapsed_time = time() - start_time\n txt = 'PATRONES, DELETE' + ' (' + str(cont) + ') NEW LIST SIZE: (' + str(\n len(anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PATRONES DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef quit_plural(valuelist):\n start_time = time()\n file = open('./data/numberlist_es', 'r', encoding='utf-8')\n read = file.readlines()\n plural = []\n cont = 0\n for i in valuelist:\n ind = valuelist.index(i)\n term = i.replace(',', '').replace('-', ' ')\n valuelist[ind] = term\n plu = ''\n if 'es' in term[-2:] or 's' in term[-1:]:\n slp = term.split(' ')\n for n in read:\n if n[:-1] in slp:\n plu = i\n if not len(plu):\n for j in slp:\n if 'es' in j[-2:] and 't' not in j[-3:-2] and 'l' not in j[\n -3:-2] or 'les' in j[-3:]:\n plu += ' ' + j[:-2]\n if 'on' in plu[-2:]:\n plu = ' ' + plu[:-2] + 'ón'\n if 'v' in plu[-1:]:\n plu = ' ' + plu + 'e'\n if 'bl' in plu[-2:]:\n plu = ' ' + plu + 'e'\n if 'br' in plu[-2:]:\n plu = ' ' + plu + 'e'\n elif 's' in j[-1:]:\n plu += ' ' + j[:-1]\n pos = slp.index(j)\n if pos > 0:\n bef = slp[0]\n if 'n' in bef[-1:] and 'ón' not in bef[-2:]:\n splb = plu.split(' ')\n firts = splb[1]\n if 'n' not in firts[-1:]:\n pass\n else:\n plu0 = firts[:-1]\n join1 = ' '.join(splb[2:])\n plu = plu0 + ' ' + join1\n else:\n plu += ' ' + j\n ind = valuelist.index(term)\n valuelist[ind] = plu.strip()\n cont = cont + 1\n quit_plu = []\n nuevalista = set(valuelist)\n for i in nuevalista:\n quit_plu.append(i)\n deletes = []\n new = []\n for i in valuelist:\n if i not in new:\n new.append(i)\n else:\n deletes.append(i)\n elapsed_time = time() - start_time\n txt = 'PLURAL, DELETE' + ' (' + str(len(valuelist) - len(quit_plu)\n ) + ') NEW LIST SIZE: (' + str(len(quit_plu)) + ') TIME: (' + str(\n elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PLURALES DELETE', len(valuelist) - len(quit_plu), len(quit_plu),\n elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return quit_plu\n\n\n<mask token>\n\n\ndef readFile(read):\n start_time = time()\n text = ''\n for i in read:\n if i[-1:] == '\\n':\n spl = i[:-1].split('\\t')\n else:\n spl = i.split('\\t')\n term = spl[1].replace('-', '').replace(',', '').replace(';', '')\n spl2 = term.split(' ')\n text += '| ' + spl[1]\n elapsed_time = time() - start_time\n return text\n\n\ndef quit_tilds(s):\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú', 'u')\n for a, b in replacements:\n s = s.replace(a, b)\n return s\n\n\n<mask token>\n\n\ndef main(read, lang_in):\n start_time = time()\n text = readFile(read)\n date = '2020-06-03'\n lang = lang_in\n termlist = text.split('| ')\n print('RECIBE', termlist)\n clean_text = clean_terms(termlist, lang_in)\n join_clean_text = '| '.join(clean_text).replace('-', '').replace(',', ''\n ).replace(';', '')\n anotador = annotate_timex(join_clean_text, date, lang)\n anotador.sort()\n if lang_in == 'es':\n pattern = delete_pattern(anotador)\n plural = quit_plural(pattern)\n numbers = delete_numbers(plural)\n tildes = acentos(numbers)\n stop2 = clean_terms(tildes, lang_in)\n print('FINALES', stop2)\n \"\"\"new=open('../data/clean_terms_freq4.txt', 'w')#se imprime lo que se queda\n\n\tfor i in stop2:\n\t new.write(i+'\n')\n\tnew.close()\n\telapsed_time=time()-start_time\n\tprint('Main', elapsed_time)\n\treturn(stop2)\"\"\"\n",
"step-3": "<mask token>\n\n\ndef preprocessing_terms(termlist, lang_in, timeEx, patternBasedClean,\n pluralClean, numbersClean, accentClean):\n date = '2020-06-03'\n print('terms:', termlist)\n print('lang:', lang_in)\n processedTerms = clean_terms(termlist, lang_in)\n print('This is processedTerms ')\n print(processedTerms)\n if timeEx == True:\n processedTerms = '| '.join(processedTerms).replace('-', '').replace(','\n , '').replace(';', '')\n processedTerms = annotate_timex(processedTerms, date, lang_in)\n processedTerms.sort()\n if lang_in == 'es' and patternBasedClean == True:\n stanza.download('es')\n pos_tagger = stanza.Pipeline('es')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'en' and patternBasedClean == True:\n stanza.download('en')\n pos_tagger = stanza.Pipeline('en')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'es' and pluralClean == True:\n processedTerms = quit_plural(processedTerms)\n if numbersClean == True:\n processedTerms = delete_numbers(processedTerms)\n if accentClean == True:\n processedTerms = acentos(processedTerms)\n processedTerms = clean_terms(processedTerms, lang_in)\n return processedTerms\n\n\ndef clean_terms(termlist, lang_in):\n start_time = time()\n if lang_in == 'es':\n stop = stopwords.words('spanish')\n file = open(sw_spanish, 'r', encoding='utf-8')\n mystop = file.readlines()\n elif lang_in == 'en':\n stop = stopwords.words('english')\n file = open(sw_english, 'r', encoding='utf-8')\n mystop = file.readlines()\n clean_list = []\n cont = 0\n for i in mystop:\n stop.append(i.strip())\n deletes = []\n for i in termlist:\n k = i.strip(',.:')\n if k.lower() in stop or k in stop:\n deletes.append(k)\n elif k.lower() not in stop or k not in stop:\n clean_list.append(k.replace(',', '').replace('-', ''))\n print(deletes)\n cont = len(termlist) - len(clean_list)\n elapsed_time = time() - start_time\n txt = 'CLEAN_TERMS, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (clean_list)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n print('CLEAN_TERMS, DELETE', cont, len(clean_list), elapsed_time)\n return clean_list\n\n\ndef annotate_timex(text, date, lang):\n f = open('texto.txt', 'w')\n f.write(text)\n textanotador2 = ''\n start_time = time()\n url = 'https://annotador.oeg.fi.upm.es/annotate'\n params = ('{\"inputText\":\"' + text +\n '\",\"inputDate\":\"\",\"domain\":\"legal\",\"lan\":\"' + lang +\n '\",\"format\":\"timex3\"}')\n headers = {'Content-Type': 'application/json;charset=utf-8'}\n response = requests.request('POST', url, headers=headers, data=params.\n encode('utf8'))\n textanotador = response.text\n print('ENTRA ANOTADOR')\n print(textanotador)\n code = response.status_code\n list_anotador = textanotador.split('|')\n print(list_anotador)\n deletes = []\n cont = 0\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n print(i)\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n anotador = []\n for i in list_anotador:\n anotador.append(i.strip().replace(',', ''))\n if code != 200:\n print(\n 'WARNING: Annotador is down. Temporal expressions could not be removed.'\n )\n anotador = text.split('| ')\n conts_log.error(\n 'Annotador is down. Temporal expressions could not be removed.',\n code)\n else:\n elapsed_time = time() - start_time\n txt = 'AÑOTADOR, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('AÑOTADOR DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef infinitive(verb):\n if verb[-2:] == 'ar' or verb[-2:] == 'er' or verb[-2:] == 'ir':\n verb = verb\n else:\n if verb[-2:] == 'rá':\n verb = verb[:-1]\n if verb[-2:] == 'án':\n verb = verb[:-2]\n if verb[-2:] == 'ré':\n verb = verb[:-1]\n return verb\n\n\ndef delete_pattern(anotador, pos_tagger):\n total = 0\n deletes = []\n start_time = time()\n lemmas_list = []\n cont = 0\n cont_inf = 0\n cont_post = 0\n for i in anotador:\n print('this is i')\n print(i)\n if len(i) > 1:\n doc = pos_tagger(i)\n sent = doc.sentences[0]\n word = sent.words\n tag = []\n for token in word:\n pos = token.upos\n term = token.text\n tupla = term, pos\n tag.append(tupla)\n print(token.text)\n print(pos)\n print('this is tag ')\n print(tag)\n total = total + 1\n joini = i\n list_pos = []\n spl = joini.split(' ')\n if joini != '':\n join_tag = ''\n for t in tag:\n print('this is t')\n print(t)\n if t[1] == 'AUX':\n doc = nlp(t[0])\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('aux--' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'NOUN':\n list_pos.append('noun-' + str(t[0]))\n if t[1] == 'VERB':\n cont_inf = cont_inf + 1\n doc = nlp(t[0])\n for tok in doc:\n l = tok.lemma_\n if l != t[0]:\n cont_post = cont_post + 1\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('verb-' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'ADV':\n list_pos.append('adv--' + str(t[0]))\n if t[1] == 'ADJ':\n list_pos.append('adj--' + str(t[0]))\n if t[1] == 'SCONJ':\n list_pos.append('sconj' + str(t[0]))\n spl_i = joini.split(' ')\n if len(list_pos) == 1:\n pos1 = list_pos[0]\n if pos1[0:4] == 'adv-':\n term = pos1[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 2 and len(spl_i) == 2:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n term = ''\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 3 and len(spl_i) == 3:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n pos3 = list_pos[2]\n term = ''\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'scon' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'scon' and pos3[0:4\n ] == 'adv-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n for i in deletes:\n if i in anotador:\n ind = anotador.index(i)\n anotador.pop(ind)\n elapsed_time = time() - start_time\n txt = 'PATRONES, DELETE' + ' (' + str(cont) + ') NEW LIST SIZE: (' + str(\n len(anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PATRONES DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef quit_plural(valuelist):\n start_time = time()\n file = open('./data/numberlist_es', 'r', encoding='utf-8')\n read = file.readlines()\n plural = []\n cont = 0\n for i in valuelist:\n ind = valuelist.index(i)\n term = i.replace(',', '').replace('-', ' ')\n valuelist[ind] = term\n plu = ''\n if 'es' in term[-2:] or 's' in term[-1:]:\n slp = term.split(' ')\n for n in read:\n if n[:-1] in slp:\n plu = i\n if not len(plu):\n for j in slp:\n if 'es' in j[-2:] and 't' not in j[-3:-2] and 'l' not in j[\n -3:-2] or 'les' in j[-3:]:\n plu += ' ' + j[:-2]\n if 'on' in plu[-2:]:\n plu = ' ' + plu[:-2] + 'ón'\n if 'v' in plu[-1:]:\n plu = ' ' + plu + 'e'\n if 'bl' in plu[-2:]:\n plu = ' ' + plu + 'e'\n if 'br' in plu[-2:]:\n plu = ' ' + plu + 'e'\n elif 's' in j[-1:]:\n plu += ' ' + j[:-1]\n pos = slp.index(j)\n if pos > 0:\n bef = slp[0]\n if 'n' in bef[-1:] and 'ón' not in bef[-2:]:\n splb = plu.split(' ')\n firts = splb[1]\n if 'n' not in firts[-1:]:\n pass\n else:\n plu0 = firts[:-1]\n join1 = ' '.join(splb[2:])\n plu = plu0 + ' ' + join1\n else:\n plu += ' ' + j\n ind = valuelist.index(term)\n valuelist[ind] = plu.strip()\n cont = cont + 1\n quit_plu = []\n nuevalista = set(valuelist)\n for i in nuevalista:\n quit_plu.append(i)\n deletes = []\n new = []\n for i in valuelist:\n if i not in new:\n new.append(i)\n else:\n deletes.append(i)\n elapsed_time = time() - start_time\n txt = 'PLURAL, DELETE' + ' (' + str(len(valuelist) - len(quit_plu)\n ) + ') NEW LIST SIZE: (' + str(len(quit_plu)) + ') TIME: (' + str(\n elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PLURALES DELETE', len(valuelist) - len(quit_plu), len(quit_plu),\n elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return quit_plu\n\n\n<mask token>\n\n\ndef readFile(read):\n start_time = time()\n text = ''\n for i in read:\n if i[-1:] == '\\n':\n spl = i[:-1].split('\\t')\n else:\n spl = i.split('\\t')\n term = spl[1].replace('-', '').replace(',', '').replace(';', '')\n spl2 = term.split(' ')\n text += '| ' + spl[1]\n elapsed_time = time() - start_time\n return text\n\n\ndef quit_tilds(s):\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú', 'u')\n for a, b in replacements:\n s = s.replace(a, b)\n return s\n\n\ndef acentos(last):\n start_time = time()\n til = []\n list_acentos = []\n for i in last:\n acento = re.search('[áéíóúÁÉÍÓÚ]+', i)\n if acento != None:\n sin = quit_tilds(i)\n list_acentos.append(i)\n til.append(sin)\n else:\n til.append(i)\n til2 = []\n delete = []\n for i in til:\n if i not in til2:\n til2.append(i)\n else:\n delete.append(i)\n indices = []\n delete2 = []\n for i in last:\n if i in delete and i not in indices:\n indices.append(i)\n delete2.append(i)\n for i in delete2:\n ind = last.index(i)\n last.pop(ind)\n last.sort()\n elapsed_time = time() - start_time\n return last\n\n\ndef main(read, lang_in):\n start_time = time()\n text = readFile(read)\n date = '2020-06-03'\n lang = lang_in\n termlist = text.split('| ')\n print('RECIBE', termlist)\n clean_text = clean_terms(termlist, lang_in)\n join_clean_text = '| '.join(clean_text).replace('-', '').replace(',', ''\n ).replace(';', '')\n anotador = annotate_timex(join_clean_text, date, lang)\n anotador.sort()\n if lang_in == 'es':\n pattern = delete_pattern(anotador)\n plural = quit_plural(pattern)\n numbers = delete_numbers(plural)\n tildes = acentos(numbers)\n stop2 = clean_terms(tildes, lang_in)\n print('FINALES', stop2)\n \"\"\"new=open('../data/clean_terms_freq4.txt', 'w')#se imprime lo que se queda\n\n\tfor i in stop2:\n\t new.write(i+'\n')\n\tnew.close()\n\telapsed_time=time()-start_time\n\tprint('Main', elapsed_time)\n\treturn(stop2)\"\"\"\n",
"step-4": "import os\nimport json\nimport csv\nimport re\nimport requests\nimport spacy\nimport nltk\nfrom nltk.parse import CoreNLPParser\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nstemmer = PorterStemmer()\nfrom time import time\nnlp = spacy.load('es_core_news_sm')\nfrom modules_api import conts_log\nsw_spanish = './data/stop-esp.txt'\nsw_english = './data/stop-eng.txt'\ninner_spanish = './data/inner-stop-esp.txt'\ninner_english = './data/inner-stop-eng.txt'\nimport stanza\n<mask token>\n\n\ndef preprocessing_terms(termlist, lang_in, timeEx, patternBasedClean,\n pluralClean, numbersClean, accentClean):\n date = '2020-06-03'\n print('terms:', termlist)\n print('lang:', lang_in)\n processedTerms = clean_terms(termlist, lang_in)\n print('This is processedTerms ')\n print(processedTerms)\n if timeEx == True:\n processedTerms = '| '.join(processedTerms).replace('-', '').replace(','\n , '').replace(';', '')\n processedTerms = annotate_timex(processedTerms, date, lang_in)\n processedTerms.sort()\n if lang_in == 'es' and patternBasedClean == True:\n stanza.download('es')\n pos_tagger = stanza.Pipeline('es')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'en' and patternBasedClean == True:\n stanza.download('en')\n pos_tagger = stanza.Pipeline('en')\n processedTerms = delete_pattern(processedTerms, pos_tagger)\n if lang_in == 'es' and pluralClean == True:\n processedTerms = quit_plural(processedTerms)\n if numbersClean == True:\n processedTerms = delete_numbers(processedTerms)\n if accentClean == True:\n processedTerms = acentos(processedTerms)\n processedTerms = clean_terms(processedTerms, lang_in)\n return processedTerms\n\n\ndef clean_terms(termlist, lang_in):\n start_time = time()\n if lang_in == 'es':\n stop = stopwords.words('spanish')\n file = open(sw_spanish, 'r', encoding='utf-8')\n mystop = file.readlines()\n elif lang_in == 'en':\n stop = stopwords.words('english')\n file = open(sw_english, 'r', encoding='utf-8')\n mystop = file.readlines()\n clean_list = []\n cont = 0\n for i in mystop:\n stop.append(i.strip())\n deletes = []\n for i in termlist:\n k = i.strip(',.:')\n if k.lower() in stop or k in stop:\n deletes.append(k)\n elif k.lower() not in stop or k not in stop:\n clean_list.append(k.replace(',', '').replace('-', ''))\n print(deletes)\n cont = len(termlist) - len(clean_list)\n elapsed_time = time() - start_time\n txt = 'CLEAN_TERMS, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (clean_list)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n print('CLEAN_TERMS, DELETE', cont, len(clean_list), elapsed_time)\n return clean_list\n\n\ndef annotate_timex(text, date, lang):\n f = open('texto.txt', 'w')\n f.write(text)\n textanotador2 = ''\n start_time = time()\n url = 'https://annotador.oeg.fi.upm.es/annotate'\n params = ('{\"inputText\":\"' + text +\n '\",\"inputDate\":\"\",\"domain\":\"legal\",\"lan\":\"' + lang +\n '\",\"format\":\"timex3\"}')\n headers = {'Content-Type': 'application/json;charset=utf-8'}\n response = requests.request('POST', url, headers=headers, data=params.\n encode('utf8'))\n textanotador = response.text\n print('ENTRA ANOTADOR')\n print(textanotador)\n code = response.status_code\n list_anotador = textanotador.split('|')\n print(list_anotador)\n deletes = []\n cont = 0\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n for i in list_anotador:\n if '<' in i and len(i) > 2:\n print(i)\n cont = cont + 1\n deletes.append(i)\n ind = list_anotador.index(i)\n list_anotador.pop(ind)\n anotador = []\n for i in list_anotador:\n anotador.append(i.strip().replace(',', ''))\n if code != 200:\n print(\n 'WARNING: Annotador is down. Temporal expressions could not be removed.'\n )\n anotador = text.split('| ')\n conts_log.error(\n 'Annotador is down. Temporal expressions could not be removed.',\n code)\n else:\n elapsed_time = time() - start_time\n txt = 'AÑOTADOR, DELETE (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('AÑOTADOR DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef infinitive(verb):\n if verb[-2:] == 'ar' or verb[-2:] == 'er' or verb[-2:] == 'ir':\n verb = verb\n else:\n if verb[-2:] == 'rá':\n verb = verb[:-1]\n if verb[-2:] == 'án':\n verb = verb[:-2]\n if verb[-2:] == 'ré':\n verb = verb[:-1]\n return verb\n\n\ndef delete_pattern(anotador, pos_tagger):\n total = 0\n deletes = []\n start_time = time()\n lemmas_list = []\n cont = 0\n cont_inf = 0\n cont_post = 0\n for i in anotador:\n print('this is i')\n print(i)\n if len(i) > 1:\n doc = pos_tagger(i)\n sent = doc.sentences[0]\n word = sent.words\n tag = []\n for token in word:\n pos = token.upos\n term = token.text\n tupla = term, pos\n tag.append(tupla)\n print(token.text)\n print(pos)\n print('this is tag ')\n print(tag)\n total = total + 1\n joini = i\n list_pos = []\n spl = joini.split(' ')\n if joini != '':\n join_tag = ''\n for t in tag:\n print('this is t')\n print(t)\n if t[1] == 'AUX':\n doc = nlp(t[0])\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('aux--' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'NOUN':\n list_pos.append('noun-' + str(t[0]))\n if t[1] == 'VERB':\n cont_inf = cont_inf + 1\n doc = nlp(t[0])\n for tok in doc:\n l = tok.lemma_\n if l != t[0]:\n cont_post = cont_post + 1\n lemlist = [tok.lemma_ for tok in doc]\n lem = ''.join(lemlist)\n lemmas_list.append(lem)\n if lem == i:\n lem = t[0]\n list_pos.append('verb-' + str(lem))\n if len(spl) == 1:\n ind = anotador.index(str(i))\n anotador[ind] = str(lem)\n if t[1] == 'ADV':\n list_pos.append('adv--' + str(t[0]))\n if t[1] == 'ADJ':\n list_pos.append('adj--' + str(t[0]))\n if t[1] == 'SCONJ':\n list_pos.append('sconj' + str(t[0]))\n spl_i = joini.split(' ')\n if len(list_pos) == 1:\n pos1 = list_pos[0]\n if pos1[0:4] == 'adv-':\n term = pos1[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 2 and len(spl_i) == 2:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n term = ''\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adv-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'adv-':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n elif len(list_pos) == 3 and len(spl_i) == 3:\n pos1 = list_pos[0]\n pos2 = list_pos[1]\n pos3 = list_pos[2]\n term = ''\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'verb':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'aux-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'adj-':\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'noun' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'aux-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'noun' and pos2[0:4] == 'adv-' and pos3[0:4\n ] == 'scon' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'scon' and pos3[0:4\n ] == 'adv-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'aux-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'verb' and pos2[0:4] == 'verb' and pos3[0:4\n ] == 'verb' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n if pos1[0:4] == 'adj-' and pos2[0:4] == 'noun' and pos3[0:4\n ] == 'adj-' and joini in anotador:\n term = pos1[5:] + ' ' + pos2[5:] + ' ' + pos3[5:]\n deletes.append(joini)\n ind = anotador.index(joini)\n cont = cont + 1\n for i in deletes:\n if i in anotador:\n ind = anotador.index(i)\n anotador.pop(ind)\n elapsed_time = time() - start_time\n txt = 'PATRONES, DELETE' + ' (' + str(cont) + ') NEW LIST SIZE: (' + str(\n len(anotador)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PATRONES DELETE', cont, len(anotador), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return anotador\n\n\ndef quit_plural(valuelist):\n start_time = time()\n file = open('./data/numberlist_es', 'r', encoding='utf-8')\n read = file.readlines()\n plural = []\n cont = 0\n for i in valuelist:\n ind = valuelist.index(i)\n term = i.replace(',', '').replace('-', ' ')\n valuelist[ind] = term\n plu = ''\n if 'es' in term[-2:] or 's' in term[-1:]:\n slp = term.split(' ')\n for n in read:\n if n[:-1] in slp:\n plu = i\n if not len(plu):\n for j in slp:\n if 'es' in j[-2:] and 't' not in j[-3:-2] and 'l' not in j[\n -3:-2] or 'les' in j[-3:]:\n plu += ' ' + j[:-2]\n if 'on' in plu[-2:]:\n plu = ' ' + plu[:-2] + 'ón'\n if 'v' in plu[-1:]:\n plu = ' ' + plu + 'e'\n if 'bl' in plu[-2:]:\n plu = ' ' + plu + 'e'\n if 'br' in plu[-2:]:\n plu = ' ' + plu + 'e'\n elif 's' in j[-1:]:\n plu += ' ' + j[:-1]\n pos = slp.index(j)\n if pos > 0:\n bef = slp[0]\n if 'n' in bef[-1:] and 'ón' not in bef[-2:]:\n splb = plu.split(' ')\n firts = splb[1]\n if 'n' not in firts[-1:]:\n pass\n else:\n plu0 = firts[:-1]\n join1 = ' '.join(splb[2:])\n plu = plu0 + ' ' + join1\n else:\n plu += ' ' + j\n ind = valuelist.index(term)\n valuelist[ind] = plu.strip()\n cont = cont + 1\n quit_plu = []\n nuevalista = set(valuelist)\n for i in nuevalista:\n quit_plu.append(i)\n deletes = []\n new = []\n for i in valuelist:\n if i not in new:\n new.append(i)\n else:\n deletes.append(i)\n elapsed_time = time() - start_time\n txt = 'PLURAL, DELETE' + ' (' + str(len(valuelist) - len(quit_plu)\n ) + ') NEW LIST SIZE: (' + str(len(quit_plu)) + ') TIME: (' + str(\n elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('PLURALES DELETE', len(valuelist) - len(quit_plu), len(quit_plu),\n elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return quit_plu\n\n\ndef delete_numbers(list_):\n start_time = time()\n file = open('./data/numberlist_es', 'r', encoding='utf-8')\n read = file.readlines()\n cont = 0\n deletes = []\n for i in read:\n if i[-1:] == '\\n':\n i = i[:-1]\n for j in list_:\n if ' ' + i + ' ' in ' ' + j + ' ':\n deletes.append(j)\n ind = list_.index(j)\n cont = cont + 1\n list_.pop(ind)\n elapsed_time = time() - start_time\n txt = 'NUMBERS, DELETE' + ' (' + str(cont) + ') NEW LIST SIZE: (' + str(len\n (list_)) + ') TIME: (' + str(elapsed_time) + ')'\n joind = ', '.join(deletes)\n print('NUMEROS DELETE', cont, len(list_), elapsed_time)\n conts_log.information(txt, 'TERMS REMOVED: ' + joind)\n return list_\n\n\ndef readFile(read):\n start_time = time()\n text = ''\n for i in read:\n if i[-1:] == '\\n':\n spl = i[:-1].split('\\t')\n else:\n spl = i.split('\\t')\n term = spl[1].replace('-', '').replace(',', '').replace(';', '')\n spl2 = term.split(' ')\n text += '| ' + spl[1]\n elapsed_time = time() - start_time\n return text\n\n\ndef quit_tilds(s):\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú', 'u')\n for a, b in replacements:\n s = s.replace(a, b)\n return s\n\n\ndef acentos(last):\n start_time = time()\n til = []\n list_acentos = []\n for i in last:\n acento = re.search('[áéíóúÁÉÍÓÚ]+', i)\n if acento != None:\n sin = quit_tilds(i)\n list_acentos.append(i)\n til.append(sin)\n else:\n til.append(i)\n til2 = []\n delete = []\n for i in til:\n if i not in til2:\n til2.append(i)\n else:\n delete.append(i)\n indices = []\n delete2 = []\n for i in last:\n if i in delete and i not in indices:\n indices.append(i)\n delete2.append(i)\n for i in delete2:\n ind = last.index(i)\n last.pop(ind)\n last.sort()\n elapsed_time = time() - start_time\n return last\n\n\ndef main(read, lang_in):\n start_time = time()\n text = readFile(read)\n date = '2020-06-03'\n lang = lang_in\n termlist = text.split('| ')\n print('RECIBE', termlist)\n clean_text = clean_terms(termlist, lang_in)\n join_clean_text = '| '.join(clean_text).replace('-', '').replace(',', ''\n ).replace(';', '')\n anotador = annotate_timex(join_clean_text, date, lang)\n anotador.sort()\n if lang_in == 'es':\n pattern = delete_pattern(anotador)\n plural = quit_plural(pattern)\n numbers = delete_numbers(plural)\n tildes = acentos(numbers)\n stop2 = clean_terms(tildes, lang_in)\n print('FINALES', stop2)\n \"\"\"new=open('../data/clean_terms_freq4.txt', 'w')#se imprime lo que se queda\n\n\tfor i in stop2:\n\t new.write(i+'\n')\n\tnew.close()\n\telapsed_time=time()-start_time\n\tprint('Main', elapsed_time)\n\treturn(stop2)\"\"\"\n",
"step-5": "import os\nimport json\nimport csv\nimport re\nimport requests\nimport spacy\nimport nltk\nfrom nltk.parse import CoreNLPParser\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nstemmer = PorterStemmer()\nfrom time import time\nnlp = spacy.load('es_core_news_sm')\nfrom modules_api import conts_log\nsw_spanish=\"./data/stop-esp.txt\"\nsw_english=\"./data/stop-eng.txt\"\ninner_spanish=\"./data/inner-stop-esp.txt\"\ninner_english=\"./data/inner-stop-eng.txt\"\nimport stanza\n\n\n### METODO PARA EL SERVICIO\n'''\ncomo el main de debajo. este método va a ser el controlador.\nMediante parámetros va a decidir qué procesos va a seguir\n\ntermList: array/lista de terminos\nlang: string con el idoma : es, en \n\n\ntimeEx: booleano que activa si se aplica timex o no\npatternBasedClean: booleano que activa si se aplican patrones o no\npluralClean: booleano que activa si se aplica limpieza de plurales o no\nnumbersClean: booleano que activa si se aplica limpieza de numeros o no\naccentClean: booleano que activa si se aplica limpieza de acentos o no\n\n\n'''\ndef preprocessing_terms(termlist, lang_in, timeEx, patternBasedClean, pluralClean, numbersClean, accentClean):\n \n date='2020-06-03' # esto debería ser automatico\n print('terms:', termlist)\n print('lang:', lang_in)\n \n # servicio básico, creo que se debería hacer siempre\n processedTerms=clean_terms(termlist, lang_in)\n \n \n \n print('This is processedTerms ')\n print(processedTerms)\n \n \n #print('this is timex' + timeEx)\n # Todo siempre sobre la misma variable: processedTerms. Da igual el camino que cojas. Usas la lista de terminos y se modifica.\n \n #opcional\n if(timeEx==True):\n processedTerms='| '.join(processedTerms).replace('-', '').replace(',', '').replace(';', '')\n processedTerms=annotate_timex(processedTerms, date, lang_in)\n processedTerms.sort()\n #opcional \n if((lang_in=='es') and (patternBasedClean==True)):\n stanza.download('es')\n pos_tagger=stanza.Pipeline('es')\n processedTerms=delete_pattern(processedTerms, pos_tagger)\n if((lang_in=='en') and (patternBasedClean==True)):\n stanza.download('en')\n pos_tagger=stanza.Pipeline('en')\n processedTerms=delete_pattern(processedTerms, pos_tagger)\n #opcional \n if((lang_in=='es') and (pluralClean==True)):\n processedTerms=quit_plural(processedTerms)\n #opcional\n if(numbersClean==True):\n processedTerms=delete_numbers(processedTerms)\n #opcional\n if(accentClean==True): \n processedTerms=acentos(processedTerms)\n #final clean \n processedTerms=clean_terms(processedTerms, lang_in)\n \n #devolvemos los terminos\n return processedTerms\n\n\n\n\n# 0 clean punctuation and stopwords\ndef clean_terms(termlist, lang_in):\n \n start_time=time()\n if(lang_in==\"es\"):\n \tstop=stopwords.words('spanish')\n \tfile=open(sw_spanish, 'r', encoding='utf-8')\n \tmystop=file.readlines()\n elif(lang_in==\"en\"):\n \tstop=stopwords.words('english')\n \tfile=open(sw_english, 'r', encoding='utf-8')\n \tmystop=file.readlines()\n\n \n clean_list = []\n cont=0\n for i in mystop:\n #print(i.strip())\n stop.append(i.strip())\n\n #print(stop)\n deletes=[]\n for i in termlist:\n k=i.strip(',.:')\n # print(k)\n if ((k.lower() in stop) or (k in stop)):\n \tdeletes.append(k)\n elif ((k.lower() not in stop) or (k not in stop)):\n clean_list.append(k.replace(',', '').replace('-', ''))\n\n print(deletes)\n cont=len(termlist)-len(clean_list)\n elapsed_time=time()-start_time\n\n txt='CLEAN_TERMS, DELETE ('+str(cont)+') NEW LIST SIZE: ('+str(len(clean_list))+') TIME: ('+str(elapsed_time)+')'\n joind=', '.join(deletes)\n conts_log.information(txt, 'TERMS REMOVED: '+joind)\n print('CLEAN_TERMS, DELETE', cont, len(clean_list), elapsed_time )\n \n \n return(clean_list)\n\n\n# 1 añotador\ndef annotate_timex(text, date, lang):\n \n f=open('texto.txt', 'w')\n f.write(text)\n textanotador2=''\n start_time=time()\n\n url = 'https://annotador.oeg.fi.upm.es/annotate' \n params = \"{\\\"inputText\\\":\\\"\"+text+\"\\\",\\\"inputDate\\\":\\\"\\\",\\\"domain\\\":\\\"legal\\\",\\\"lan\\\":\\\"\"+lang+\"\\\",\\\"format\\\":\\\"timex3\\\"}\"\n headers = {\n\t\t \t\t'Content-Type': 'application/json;charset=utf-8'\n\t}\n #response=requests.post(url, data=params)\n response=requests.request(\"POST\", url, headers=headers, data = params.encode('utf8'))\n textanotador=response.text\n print('ENTRA ANOTADOR')\n print(textanotador)\n\n code=response.status_code\n list_anotador=textanotador.split('|')\n print(list_anotador)\n \n deletes=[]\n cont=0\n for i in list_anotador:\n if('<' in i and len(i)>2):\n cont=cont+1\n deletes.append(i)\n ind=list_anotador.index(i)\n list_anotador.pop(ind)\n for i in list_anotador:\n if('<' in i and len(i)>2):\n print(i)\n cont=cont+1\n deletes.append(i)\n ind=list_anotador.index(i)\n list_anotador.pop(ind)\n \n \n anotador=[]\n for i in list_anotador:\n anotador.append(i.strip().replace(',', ''))\n \n\n if(code!=200):\n\t print('WARNING: Annotador is down. Temporal expressions could not be removed.' )\n\t anotador=text.split('| ')\n\t conts_log.error('Annotador is down. Temporal expressions could not be removed.', code)\n else:\n\t elapsed_time=time()-start_time\n\t txt='AÑOTADOR, DELETE ('+str(cont)+') NEW LIST SIZE: ('+str(len(anotador))+') TIME: ('+str(elapsed_time)+')'\n\t joind=', '.join(deletes)\n\t print('AÑOTADOR DELETE', cont, len(anotador), elapsed_time )\n\t conts_log.information(txt, 'TERMS REMOVED: '+joind)\n \n return(anotador)\n\n\n\n\n\ndef infinitive(verb):\n\t\n\tif(verb[-2:]=='ar' or verb[-2:]=='er' or verb[-2:]=='ir'):\n\t\tverb=verb\n\telse:\n\t\tif(verb[-2:]=='rá' ):\n\t\t\t#print('---',verb,'-',verb[:-1])\n\t\t\tverb=verb[:-1]\n\t\tif(verb[-2:]=='án'):\n\t\t\t#print('---',verb,'-',verb[:-2])\n\t\t\tverb=verb[:-2]\n\t\tif(verb[-2:]=='ré'):\n\t\t\t#print('---',verb,'-',verb[:-1])\n\t\t\tverb=verb[:-1]\n\treturn (verb)\n\n\n# 2.1 patrones es\ndef delete_pattern(anotador, pos_tagger):\n\ttotal=0\n\tdeletes=[]\n\tstart_time=time()\n\tlemmas_list=[]\n\tcont=0\n\tcont_inf=0\n\tcont_post=0\n\tfor i in anotador:\n\t\tprint('this is i')\n\t\tprint(i) \n\t\tif(len(i)>1):\n\t\t\t#print( i, i.split(' ') )\n\t\t\t#pos_tagger = CoreNLPParser('https://corenlp.run/', tagtype='pos')\n #si se cae el de lynx, probar con este https://corenlp.run/\n\t\t\t#print(i)\n\t\t\tdoc=pos_tagger(i)\n\t\t\t#print(doc)\n\t\t\tsent=doc.sentences[0]\n\t\t\tword=sent.words\n\t\t\ttag=[]\n\t\t\tfor token in word:\n\t\t\t\tpos=token.upos\n\t\t\t\tterm=token.text\n\t\t\t\ttupla=(term, pos)\n\t\t\t\ttag.append(tupla)\n\t\t\t\tprint(token.text)\n\t\t\t\tprint(pos)\n\t\t\t#tag=pos_tagger.tag(i.split(' '))\n\t\t\tprint('this is tag ') \n\t\t\tprint(tag)\n\t\t\ttotal=total+1\n\t\t\tjoini=i\n\t\t\tlist_pos=[]\n\t\t\tspl=joini.split(' ')\n\t\t\tif(joini!=''):\n\t\t\t\tjoin_tag=''\n\t\t\t\tfor t in tag:\n\t\t\t\t\tprint('this is t') \n\t\t\t\t\tprint(t)\n\t\t\t\t\tif(t[1] == 'AUX' ):\n\t\t\t\t\t\tdoc=nlp(t[0])\n\t\t\t\t\t\tlemlist=[tok.lemma_ for tok in doc]\n\t\t\t\t\t\tlem=''.join(lemlist)\n\t\t\t\t\t\tlemmas_list.append(lem)\n\t\t\t\t\t\tif(lem==i):\n\t\t\t\t\t\t\tlem=t[0]\n\t\t\t\t\t\tlist_pos.append('aux--'+str(lem))\n\t\t\t\t\t\tif(len(spl)==1):\n\t\t\t\t\t\t\tind=anotador.index(str(i))\n\t\t\t\t\t\t\tanotador[ind]=str(lem)\n\t\t\t\t\tif(t[1] == 'NOUN'):\n\t\t\t\t\t\tlist_pos.append('noun-'+str(t[0]))\n\t\t\t\t\tif(t[1] == 'VERB'):\n\t\t\t\t\t\tcont_inf=cont_inf+1\n\t\t\t\t\t\tdoc=nlp(t[0])\n\t\t\t\t\t\tfor tok in doc:\n\t\t\t\t\t\t\tl=tok.lemma_\n\t\t\t\t\t\t\tif(l!=t[0]):\n\t\t\t\t\t\t\t\tcont_post=cont_post+1\n\t\t\t\t\t\tlemlist=[tok.lemma_ for tok in doc]\n\t\t\t\t\t\tlem=''.join(lemlist)\n\t\t\t\t\t\tlemmas_list.append(lem)\n\t\t\t\t\t\tif(lem==i):\n\t\t\t\t\t\t\tlem=t[0]\n\t\t\t\t\t\tlist_pos.append('verb-'+str(lem))\n\t\t\t\t\t\tif(len(spl)==1):\n\t\t\t\t\t\t\tind=anotador.index(str(i))\n\t\t\t\t\t\t\tanotador[ind]=str(lem)\n\t\t\t\t\tif(t[1] == 'ADV'):\n\t\t\t\t\t\tlist_pos.append('adv--'+str(t[0]))\n\t\t\t\t\tif(t[1] == 'ADJ'):\n\t\t\t\t\t\tlist_pos.append('adj--'+str(t[0]))\n\t\t\t\t\tif(t[1] == 'SCONJ'):\n\t\t\t\t\t\tlist_pos.append('sconj'+str(t[0]))\n\t\t\t\t\n\t\t\t\tspl_i=joini.split(' ')\n\t\t\t\t\n\t\t\t\tif(len(list_pos)==1):\n\t\t\t\t\tpos1=list_pos[0]\n\t\t\t\t\tif(pos1[0:4]=='adv-' ):\n\t\t\t\t\t\tterm=pos1[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\n\t\t\t\telif(len(list_pos)==2 and len(spl_i)==2):\n\t\t\t\t\tpos1=list_pos[0]\n\t\t\t\t\tpos2=list_pos[1]\n\t\t\t\t\tterm=''\n\t\t\t\t\tif(pos1[0:4]=='aux-' and pos2[0:4]=='verb'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='aux-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='verb'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='verb'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='aux-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adv-' and pos2[0:4]=='adj-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adj-' and pos2[0:4]=='adv-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adv-' and pos2[0:4]=='aux-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='aux-' and pos2[0:4]=='adv-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adv-' and pos2[0:4]=='verb'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='aux-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='adv-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adv-' and pos2[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='adv-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='aux-' and pos2[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adj-' and pos2[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\n\t\t\t\telif(len(list_pos)==3 and len(spl_i)==3):\n\t\t\t\t\t#print(list_pos, spl_i,'-', len(list_pos), len(spl_i))\n\t\t\t\t\tpos1=list_pos[0]\n\t\t\t\t\tpos2=list_pos[1]\n\t\t\t\t\tpos3=list_pos[2]\n\t\t\t\t\tterm=''\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='verb'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='verb'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='aux-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='aux-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\t\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='noun' and pos3[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='noun' and pos3[0:4]=='verb'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='aux-' and pos2[0:4]=='noun' and pos3[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='noun' and pos3[0:4]=='aux-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='aux-' and pos2[0:4]=='verb' and pos3[0:4]=='noun'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='adj-'):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='noun' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='adj-' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='adv-' and pos3[0:4]=='adj-' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adj-' and pos2[0:4]=='adv-' and pos3[0:4]=='adj-' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='noun' and pos2[0:4]=='adv-' and pos3[0:4]=='scon' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adj-' and pos2[0:4]=='scon' and pos3[0:4]=='adv-' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='aux-' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='verb' and pos2[0:4]=='verb' and pos3[0:4]=='verb' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\t\t\t\t\tif(pos1[0:4]=='adj-' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):\n\t\t\t\t\t\tterm=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]\n\t\t\t\t\t\tdeletes.append(joini)\n\t\t\t\t\t\tind=anotador.index(joini)\n\t\t\t\t\t\t#anotador.pop(ind)\n\t\t\t\t\t\tcont=cont+1\n\n\tfor i in deletes:\n\t\tif(i in anotador):\n\t\t\tind=anotador.index(i)\n\t\t\tanotador.pop(ind)\n\t\t\t\n\t\n\telapsed_time=time()-start_time\n\ttxt='PATRONES, DELETE'+' ('+str(cont)+') NEW LIST SIZE: ('+str(len(anotador))+') TIME: ('+str(elapsed_time)+')'\n\tjoind=', '.join(deletes)\n\tprint('PATRONES DELETE', cont, len(anotador), elapsed_time)\n\tconts_log.information(txt, 'TERMS REMOVED: '+joind)\n\treturn(anotador)\n\n\n\n\n# 3 plurales\ndef quit_plural(valuelist):\n\tstart_time=time()\n\tfile=open('./data/numberlist_es', 'r', encoding='utf-8')\n\tread=file.readlines()\n\tplural=[]\n\tcont=0\n\tfor i in valuelist:\n\t\tind=valuelist.index(i)\n\t\tterm=i.replace(',', '').replace('-', ' ')\n\t\tvaluelist[ind]=term\n\t\tplu=''\n\t\tif('es' in term[-2:] or 's' in term[-1:]):\n\t\t\tslp=term.split(' ')\n\n\t\t\tfor n in read:\n\t\t\t\tif(n[:-1] in slp):\n\t\t\t\t\tplu=i\n\n\t\t\tif not len(plu):\n\t\t\t\tfor j in slp:\n\t\t\t\t\tif( ('es' in j[-2:] ) and 't' not in j[-3:-2] and 'l' not in j[-3:-2] or ('les' in j[-3:] ) ):\n\t\t\t\t\t\tplu+=' '+j[:-2]\n\t\t\t\t\t\t\n\t\t\t\t\t\tif('on' in plu[-2:]):\n\t\t\t\t\t\t\tplu=' '+plu[:-2]+'ón'\n\t\t\t\t\t\tif('v' in plu[-1:]):\n\t\t\t\t\t\t\tplu=' '+plu+'e'\n\t\t\t\t\t\tif('bl' in plu[-2:]):\n\t\t\t\t\t\t\tplu=' '+plu+'e'\n\t\t\t\t\t\tif('br' in plu[-2:]):\n\t\t\t\t\t\t\tplu=' '+plu+'e'\n\n\t\t\t\t\telif(('s' in j[-1:]) ):\n\t\t\t\t\t\tplu+=' '+j[:-1]\n\t\t\t\t\t\tpos=slp.index(j)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif(pos>0):\n\t\t\t\t\t\t\tbef=slp[0]\n\t\t\t\t\t\t\tif('n' in bef[-1:] and 'ón' not in bef[-2:]):\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tsplb=plu.split(' ')\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tfirts=splb[1]\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif('n' not in firts[-1:]):\n\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tplu0=firts[:-1]\n\t\t\t\t\t\t\t\t\tjoin1=' '.join(splb[2:])\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tplu=plu0+' '+join1\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tplu+=' '+j\n\n\t\t\tind=valuelist.index(term)\n\t\t\tvaluelist[ind]=plu.strip()\t\t\t\n\t\t\tcont=cont+1\n\tquit_plu=[]\n\tnuevalista=set(valuelist)\n\tfor i in nuevalista:\n\t\tquit_plu.append(i)\t\n\n\tdeletes = []\n\tnew=[]\n\tfor i in valuelist:\n\t if i not in new:\n\t new.append(i)\n\t else:\n\t \tdeletes.append(i)\n\t#print('plurañes eliminadas ->', deletes)\n\telapsed_time=time()-start_time\n\ttxt='PLURAL, DELETE'+' ('+str(len(valuelist)-len(quit_plu))+') NEW LIST SIZE: ('+str(len(quit_plu))+') TIME: ('+str(elapsed_time)+')'\n\tjoind=', '.join(deletes)\n\tprint('PLURALES DELETE', len(valuelist)-len(quit_plu), len(quit_plu), elapsed_time)\n\tconts_log.information(txt, 'TERMS REMOVED: '+joind)\n\treturn(quit_plu)\n\n# 4 numeros\ndef delete_numbers(list_):\n\tstart_time=time()\n\tfile=open('./data/numberlist_es', 'r', encoding='utf-8')\n\tread=file.readlines()\n\tcont=0\n\tdeletes=[]\n\tfor i in read:\n\t\tif(i[-1:]=='\\n'):\n\t\t\ti=i[:-1]\n\t\t\tfor j in list_:\n\t\t\t\tif(' '+i+' ' in ' '+j+' ' ):\n\t\t\t\t\tdeletes.append(j)\n\t\t\t\t\tind=list_.index(j)\n\t\t\t\t\tcont=cont+1\n\t\t\t\t\tlist_.pop(ind)\n\t#list_.sort()\n\telapsed_time=time()-start_time\n\ttxt='NUMBERS, DELETE'+' ('+str(cont)+') NEW LIST SIZE: ('+str(len(list_))+') TIME: ('+str(elapsed_time)+')'\n\tjoind=', '.join(deletes)\n\tprint('NUMEROS DELETE', cont, len(list_), elapsed_time)\n\tconts_log.information(txt, 'TERMS REMOVED: '+joind)\n\treturn(list_)\n\n\n# 5 leer archivo \ndef readFile(read):\n\tstart_time=time()\n\ttext=''\n\tfor i in read:\n\t\tif(i[-1:]=='\\n'):\n\t\t\tspl=i[:-1].split('\\t')\n\t\telse:\n\t\t\tspl=i.split('\\t')\n\t\tterm=spl[1].replace('-', '').replace(',', '').replace(';', '')\n\t\tspl2=term.split(' ')\n\t\ttext+='| '+spl[1]\n\telapsed_time=time()-start_time\n\treturn text\n\n#elimina tildes\ndef quit_tilds(s):\n replacements = (\n (\"á\", \"a\"),\n (\"é\", \"e\"),\n (\"í\", \"i\"),\n (\"ó\", \"o\"),\n (\"ú\", \"u\"),\n )\n for a, b in replacements:\n s = s.replace(a, b)\n return s\n\ndef acentos(last):\n\tstart_time=time()\n\ttil=[]\n\tlist_acentos=[]\n\tfor i in last:\n\t\tacento=re.search(\"[áéíóúÁÉÍÓÚ]+\", i)\n\t\tif(acento!=None):\n\t\t\tsin=quit_tilds(i)\n\t\t\tlist_acentos.append(i)\n\t\t\ttil.append(sin)\n\t\telse:\n\t\t\ttil.append(i)\n\n\ttil2 = []\n\tdelete=[]\n\tfor i in til:\n\t\tif i not in til2:\n\t\t\ttil2.append(i)\n\t\telse:\n\t\t\tdelete.append(i)\n\n\tindices=[]\n\tdelete2=[]\n\tfor i in last:\n\t\tif(i in delete and i not in indices):\n\t\t\tindices.append(i)\n\t\t\tdelete2.append(i)\n\tfor i in delete2:\n\t\tind=last.index(i)\n\t\tlast.pop(ind)\n\n\tlast.sort()\n\telapsed_time=time()-start_time\n\t\n\treturn(last)\n\n\n#-------MAIN-------#\ndef main(read, lang_in):\n\tstart_time=time()\n\ttext=readFile(read)\n\tdate='2020-06-03'\n\tlang=lang_in\n\ttermlist=text.split('| ')\n\tprint('RECIBE', termlist)\n\tclean_text=clean_terms(termlist, lang_in)\n\tjoin_clean_text='| '.join(clean_text).replace('-', '').replace(',', '').replace(';', '')\n\tanotador=annotate_timex(join_clean_text, date, lang)\n\tanotador.sort()\n\tif(lang_in=='es'):\n\t\tpattern=delete_pattern(anotador)\n\t\tplural=quit_plural(pattern)\n\n\t\n\t\n\t\n\t\n\tnumbers=delete_numbers(plural)\n\n\ttildes=acentos(numbers)\n\tstop2=clean_terms(tildes, lang_in)\n\tprint('FINALES', stop2)\n\t'''new=open('../data/clean_terms_freq4.txt', 'w')#se imprime lo que se queda\n\n\tfor i in stop2:\n\t new.write(i+'\\n')\n\tnew.close()\n\telapsed_time=time()-start_time\n\tprint('Main', elapsed_time)\n\treturn(stop2)'''\n\n\n#file=open('../data/estatuto_es.txt', 'r', encoding='utf-8')\n#read=file.readlines()\n#main(read)\n",
"step-ids": [
8,
9,
10,
13,
14
]
}
|
[
8,
9,
10,
13,
14
] |
<|reserved_special_token_0|>
class Chick(Sprite):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Chick(Sprite):
def on_create(self):
self.image = 'chick-a.png'
self.goto_random_position()
self.opacity = 500
self.scale = 1
self.rotation = randint(0, 360)
for i in range(1000):
e = window.create_sprite(Chick)
e.opacity = 200
e.scale = 2
e.color = Color.RED
window.run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
window = Window()
class Chick(Sprite):
def on_create(self):
self.image = 'chick-a.png'
self.goto_random_position()
self.opacity = 500
self.scale = 1
self.rotation = randint(0, 360)
for i in range(1000):
e = window.create_sprite(Chick)
e.opacity = 200
e.scale = 2
e.color = Color.RED
window.run()
<|reserved_special_token_1|>
from pycat.base.color import Color
from pycat.sprite import Sprite
from pycat.window import Window
from pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV
from random import randint
window = Window()
class Chick(Sprite):
def on_create(self):
self.image = 'chick-a.png'
self.goto_random_position()
self.opacity = 500
self.scale = 1
self.rotation = randint(0, 360)
for i in range(1000):
e = window.create_sprite(Chick)
e.opacity = 200
e.scale = 2
e.color = Color.RED
window.run()
<|reserved_special_token_1|>
from pycat.base.color import Color
from pycat.sprite import Sprite
from pycat.window import Window
from pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV
from random import randint
window=Window()
class Chick(Sprite):
def on_create(self):
self.image = 'chick-a.png'
self.goto_random_position()
self.opacity = 500
self.scale = 1
self.rotation = randint(0, 360)
# c1 = window.create_sprite(Chick)
# c2 = window.create_sprite(Chick)
for i in range(1000):
e = window.create_sprite(Chick)
e.opacity = 200
e.scale = 2
e.color = Color.RED
window.run()
|
flexible
|
{
"blob_id": "cc7942c406e9bcb5af43f131fdf0a6441f81c16a",
"index": 4260,
"step-1": "<mask token>\n\n\nclass Chick(Sprite):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-3": "<mask token>\nwindow = Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-4": "from pycat.base.color import Color\nfrom pycat.sprite import Sprite\nfrom pycat.window import Window\nfrom pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV\nfrom random import randint\nwindow = Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-5": "from pycat.base.color import Color\nfrom pycat.sprite import Sprite\nfrom pycat.window import Window\nfrom pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV\nfrom random import randint\nwindow=Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\n# c1 = window.create_sprite(Chick)\n# c2 = window.create_sprite(Chick)\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\n\nwindow.run()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Linear(functions.Learn):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class LinearNode(functions.Node):
def __init__(self, in_size, out_size, eps):
super().__init__(Linear(eps))
self.param_name = ['w', 'b']
self.w = numpy.random.randn(out_size, in_size)
self.b = numpy.random.randn(out_size)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Linear(functions.Learn):
def implement(self, a, p):
x = a[0]
w, b0 = p
return w.dot(x) + b0
<|reserved_special_token_0|>
def request(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
return x - w.T.dot(i - b),
class LinearNode(functions.Node):
def __init__(self, in_size, out_size, eps):
super().__init__(Linear(eps))
self.param_name = ['w', 'b']
self.w = numpy.random.randn(out_size, in_size)
self.b = numpy.random.randn(out_size)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Linear(functions.Learn):
def implement(self, a, p):
x = a[0]
w, b0 = p
return w.dot(x) + b0
def update(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])
u_b0 = b0 - self.eps * (i - b)
return u_w, u_b0
def request(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
return x - w.T.dot(i - b),
class LinearNode(functions.Node):
def __init__(self, in_size, out_size, eps):
super().__init__(Linear(eps))
self.param_name = ['w', 'b']
self.w = numpy.random.randn(out_size, in_size)
self.b = numpy.random.randn(out_size)
<|reserved_special_token_1|>
import numpy
from nn_functor import functions
class Linear(functions.Learn):
def implement(self, a, p):
x = a[0]
w, b0 = p
return w.dot(x) + b0
def update(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])
u_b0 = b0 - self.eps * (i - b)
return u_w, u_b0
def request(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
return x - w.T.dot(i - b),
class LinearNode(functions.Node):
def __init__(self, in_size, out_size, eps):
super().__init__(Linear(eps))
self.param_name = ['w', 'b']
self.w = numpy.random.randn(out_size, in_size)
self.b = numpy.random.randn(out_size)
<|reserved_special_token_1|>
import numpy
from nn_functor import functions
class Linear(functions.Learn):
def implement(self, a, p):
x = a[0]
w, b0 = p
return w.dot(x) + b0
def update(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])
u_b0 = b0 - self.eps * (i - b)
return u_w, u_b0
def request(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
return x - w.T.dot(i - b),
class LinearNode(functions.Node):
def __init__(self, in_size, out_size, eps):
super().__init__(Linear(eps))
self.param_name = [
"w", "b"
]
self.w = numpy.random.randn(out_size, in_size)
self.b = numpy.random.randn(out_size)
|
flexible
|
{
"blob_id": "ec9a152e39a0c51319e4db58eea4496cff5b2fd6",
"index": 3427,
"step-1": "<mask token>\n\n\nclass Linear(functions.Learn):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-2": "<mask token>\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n return w.dot(x) + b0\n <mask token>\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-3": "<mask token>\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n return w.dot(x) + b0\n\n def update(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])\n u_b0 = b0 - self.eps * (i - b)\n return u_w, u_b0\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-4": "import numpy\nfrom nn_functor import functions\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n return w.dot(x) + b0\n\n def update(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])\n u_b0 = b0 - self.eps * (i - b)\n return u_w, u_b0\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-5": "import numpy\n\nfrom nn_functor import functions\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n\n return w.dot(x) + b0\n\n def update(self, a, b, p):\n i = self.implement(a, p)\n\n x = a[0]\n w, b0 = p\n\n u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])\n u_b0 = b0 - self.eps * (i - b)\n return u_w, u_b0\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n\n x = a[0]\n w, b0 = p\n\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n\n self.param_name = [\n \"w\", \"b\"\n ]\n\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from __future__ import print_function
import os, sys, time
import fitz
import PySimpleGUI as sg
"""
PyMuPDF utility
----------------
For a given entry in a page's getImagleList() list, function "recoverpix"
returns either the raw image data, or a modified pixmap if an /SMask entry
exists.
The item's first two entries are PDF xref numbers. The first one is the image in
question, the second one may be 0 or the object id of a soft-image mask. In this
case, we assume it being a sequence of alpha bytes belonging to our image.
We then create a new Pixmap giving it these alpha values, and return it.
If the result pixmap is CMYK, it will be converted to RGB first.
"""
print(fitz.__doc__)
if not tuple(map(int, fitz.version[0].split("."))) >= (1, 13, 17):
raise SystemExit("require PyMuPDF v1.13.17+")
dimlimit = 100 # each image side must be greater than this
relsize = 0.05 # image : pixmap size ratio must be larger than this (5%)
abssize = 2048 # absolute image size limit 2 KB: ignore if smaller
imgdir = "images" # found images are stored in this subfolder
if not os.path.exists(imgdir):
os.mkdir(imgdir)
def recoverpix(doc, item):
x = item[0] # xref of PDF image
s = item[1] # xref of its /SMask
if s == 0: # no smask: use direct image output
return doc.extractImage(x)
def getimage(pix):
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
# we need to reconstruct the alpha channel with the smask
pix1 = fitz.Pixmap(doc, x)
pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry
# sanity check
if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1):
pix2 = None
return getimage(pix1)
pix = fitz.Pixmap(pix1) # copy of pix1, alpha channel added
pix.setAlpha(pix2.samples) # treat pix2.samples as alpha value
pix1 = pix2 = None # free temp pixmaps
# we may need to adjust something for CMYK pixmaps here:
return getimage(pix)
fname = sys.argv[1] if len(sys.argv) == 2 else None
if not fname:
fname = sg.PopupGetFile("Select file:", title="PyMuPDF PDF Image Extraction")
if not fname:
raise SystemExit()
t0 = time.time()
doc = fitz.open(fname)
page_count = len(doc) # number of pages
xreflist = []
imglist = []
for pno in range(page_count):
sg.QuickMeter(
"Extract Images", # show our progress
pno + 1,
page_count,
"*** Scanning Pages ***",
)
il = doc.getPageImageList(pno)
imglist.extend([x[0] for x in il])
for img in il:
xref = img[0]
if xref in xreflist:
continue
width = img[2]
height = img[3]
if min(width, height) <= dimlimit:
continue
pix = recoverpix(doc, img)
if type(pix) is dict: # we got a raw image
ext = pix["ext"]
imgdata = pix["image"]
n = pix["colorspace"]
imgfile = os.path.join(imgdir, "img-%i.%s" % (xref, ext))
else: # we got a pixmap
imgfile = os.path.join(imgdir, "img-%i.png" % xref)
n = pix.n
imgdata = pix.getPNGData()
if len(imgdata) <= abssize:
continue
if len(imgdata) / (width * height * n) <= relsize:
continue
fout = open(imgfile, "wb")
fout.write(imgdata)
fout.close()
xreflist.append(xref)
t1 = time.time()
imglist = list(set(imglist))
print(len(set(imglist)), "images in total")
print(len(xreflist), "images extracted")
print("total time %g sec" % (t1 - t0))
|
normal
|
{
"blob_id": "856afd30a2ed01a1d44bbe91a7b69998e9a51bb7",
"index": 3170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(fitz.__doc__)\nif not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):\n raise SystemExit('require PyMuPDF v1.13.17+')\ndimlimit = 100\nrelsize = 0.05\nabssize = 2048\nimgdir = 'images'\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile('Select file:', title=\n 'PyMuPDF PDF Image Extraction')\nif not fname:\n raise SystemExit()\nt0 = time.time()\ndoc = fitz.open(fname)\npage_count = len(doc)\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter('Extract Images', pno + 1, page_count,\n '*** Scanning Pages ***')\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict:\n ext = pix['ext']\n imgdata = pix['image']\n n = pix['colorspace']\n imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))\n else:\n imgfile = os.path.join(imgdir, 'img-%i.png' % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n if len(imgdata) <= abssize:\n continue\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n fout = open(imgfile, 'wb')\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), 'images in total')\nprint(len(xreflist), 'images extracted')\nprint('total time %g sec' % (t1 - t0))\n",
"step-4": "from __future__ import print_function\nimport os, sys, time\nimport fitz\nimport PySimpleGUI as sg\n<mask token>\nprint(fitz.__doc__)\nif not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):\n raise SystemExit('require PyMuPDF v1.13.17+')\ndimlimit = 100\nrelsize = 0.05\nabssize = 2048\nimgdir = 'images'\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile('Select file:', title=\n 'PyMuPDF PDF Image Extraction')\nif not fname:\n raise SystemExit()\nt0 = time.time()\ndoc = fitz.open(fname)\npage_count = len(doc)\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter('Extract Images', pno + 1, page_count,\n '*** Scanning Pages ***')\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict:\n ext = pix['ext']\n imgdata = pix['image']\n n = pix['colorspace']\n imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))\n else:\n imgfile = os.path.join(imgdir, 'img-%i.png' % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n if len(imgdata) <= abssize:\n continue\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n fout = open(imgfile, 'wb')\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), 'images in total')\nprint(len(xreflist), 'images extracted')\nprint('total time %g sec' % (t1 - t0))\n",
"step-5": "from __future__ import print_function\nimport os, sys, time\nimport fitz\nimport PySimpleGUI as sg\n\n\"\"\"\nPyMuPDF utility\n----------------\nFor a given entry in a page's getImagleList() list, function \"recoverpix\"\nreturns either the raw image data, or a modified pixmap if an /SMask entry\nexists.\nThe item's first two entries are PDF xref numbers. The first one is the image in\nquestion, the second one may be 0 or the object id of a soft-image mask. In this\ncase, we assume it being a sequence of alpha bytes belonging to our image.\nWe then create a new Pixmap giving it these alpha values, and return it.\nIf the result pixmap is CMYK, it will be converted to RGB first.\n\"\"\"\nprint(fitz.__doc__)\n\nif not tuple(map(int, fitz.version[0].split(\".\"))) >= (1, 13, 17):\n raise SystemExit(\"require PyMuPDF v1.13.17+\")\n\ndimlimit = 100 # each image side must be greater than this\nrelsize = 0.05 # image : pixmap size ratio must be larger than this (5%)\nabssize = 2048 # absolute image size limit 2 KB: ignore if smaller\nimgdir = \"images\" # found images are stored in this subfolder\n\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0] # xref of PDF image\n s = item[1] # xref of its /SMask\n if s == 0: # no smask: use direct image output\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n\n # we need to reconstruct the alpha channel with the smask\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry\n\n # sanity check\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n\n pix = fitz.Pixmap(pix1) # copy of pix1, alpha channel added\n pix.setAlpha(pix2.samples) # treat pix2.samples as alpha value\n pix1 = pix2 = None # free temp pixmaps\n\n # we may need to adjust something for CMYK pixmaps here:\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile(\"Select file:\", title=\"PyMuPDF PDF Image Extraction\")\nif not fname:\n raise SystemExit()\n\nt0 = time.time()\ndoc = fitz.open(fname)\n\npage_count = len(doc) # number of pages\n\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter(\n \"Extract Images\", # show our progress\n pno + 1,\n page_count,\n \"*** Scanning Pages ***\",\n )\n\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict: # we got a raw image\n ext = pix[\"ext\"]\n imgdata = pix[\"image\"]\n n = pix[\"colorspace\"]\n imgfile = os.path.join(imgdir, \"img-%i.%s\" % (xref, ext))\n else: # we got a pixmap\n imgfile = os.path.join(imgdir, \"img-%i.png\" % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n\n if len(imgdata) <= abssize:\n continue\n\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n\n fout = open(imgfile, \"wb\")\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\n\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), \"images in total\")\nprint(len(xreflist), \"images extracted\")\nprint(\"total time %g sec\" % (t1 - t0))",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
import itertools
from typing import Tuple, List, Dict, Optional, Hashable, Collection
class Hypergraph:
"""
Represents a hypergraph, consisting of nodes, directed edges,
hypernodes (each of which is a set of nodes) and hyperedges (directed edges
from hypernodes to hypernodes). Contains functionality to extract motifs
from hypergraphs (Fig 2 of
http://www.cs.cornell.edu/~cristian/Patterns_of_participant_interactions.html)
"""
def __init__(self):
# public
self.nodes = dict()
self.hypernodes = dict()
# private
self.adj_out = dict() # out edges for each (hyper)node
self.adj_in = dict() # in edges for each (hyper)node
def add_node(self, u: Hashable, info: Optional[Dict]=None) -> None:
self.nodes[u] = info if info is not None else dict()
self.adj_out[u] = dict()
self.adj_in[u] = dict()
def add_hypernode(self, name: Hashable,
nodes: Collection[Hashable],
info: Optional[dict]=None) -> None:
self.hypernodes[name] = set(nodes)
self.adj_out[name] = dict()
self.adj_in[name] = dict()
# edge or hyperedge
def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None) -> None:
assert u in self.nodes or u in self.hypernodes
assert v in self.nodes or v in self.hypernodes
if u in self.hypernodes and v in self.hypernodes:
assert len(info.keys()) > 0
if v not in self.adj_out[u]:
self.adj_out[u][v] = []
if u not in self.adj_in[v]:
self.adj_in[v][u] = []
if info is None: info = dict()
self.adj_out[u][v].append(info)
self.adj_in[v][u].append(info)
def edges(self) -> Dict[Tuple[Hashable, Hashable], List]:
return dict(((u, v), lst) for u, d in self.adj_out.items()
for v, lst in d.items())
def outgoing_nodes(self, u: Hashable) -> Dict[Hashable, List]:
assert u in self.adj_out
return dict((v, lst) for v, lst in self.adj_out[u].items()
if v in self.nodes)
def outgoing_hypernodes(self, u) -> Dict[Hashable, List]:
assert u in self.adj_out
return dict((v, lst) for v, lst in self.adj_out[u].items()
if v in self.hypernodes)
def incoming_nodes(self, v: Hashable) -> Dict[Hashable, List]:
assert v in self.adj_in
return dict((u, lst) for u, lst in self.adj_in[v].items() if u in
self.nodes)
def incoming_hypernodes(self, v: Hashable) -> Dict[Hashable, List]:
assert v in self.adj_in
return dict((u, lst) for u, lst in self.adj_in[v].items() if u in
self.hypernodes)
def outdegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:
return [sum([len(l) for v, l in self.adj_out[u].items() if v in
(self.hypernodes if to_hyper else self.nodes)]) for u in
(self.hypernodes if from_hyper else self.nodes)]
def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:
return [sum([len(l) for u, l in self.adj_in[v].items() if u in
(self.hypernodes if from_hyper else self.nodes)]) for v in
(self.hypernodes if to_hyper else self.nodes)]
def reciprocity_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper
"""
motifs = []
for C1, c1_nodes in self.hypernodes.items():
for c1 in c1_nodes:
motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if
c2 in self.nodes and c2 in self.adj_out[C1]
for e1 in self.adj_out[C1][c2]
for e2 in self.adj_out[c2][c1]]
return motifs
def external_reciprocity_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper
"""
motifs = []
for C3 in self.hypernodes:
for c2 in self.adj_out[C3]:
if c2 in self.nodes:
motifs += [(C3, c2, c1, e1, e2) for c1 in
set(self.adj_out[c2].keys()) - self.hypernodes[C3]
if c1 in self.nodes
for e1 in self.adj_out[C3][c2]
for e2 in self.adj_out[c2][c1]]
return motifs
def dyadic_interaction_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper
"""
motifs = []
for C1 in self.hypernodes:
motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in
self.hypernodes and C1 in self.adj_out[C2]
for e1 in self.adj_out[C1][C2]
for e2 in self.adj_out[C2][C1]]
return motifs
def incoming_triad_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper
"""
motifs = []
for C1 in self.hypernodes:
incoming = list(self.adj_in[C1].keys())
motifs += [(C1, C2, C3, e1, e2) for C2, C3 in
itertools.combinations(incoming, 2)
for e1 in self.adj_out[C2][C1]
for e2 in self.adj_out[C3][C1]]
return motifs
def outgoing_triad_motifs(self) -> List[Tuple]:
"""
:return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper
"""
motifs = []
for C1 in self.hypernodes:
outgoing = list(self.adj_out[C1].keys())
motifs += [(C1, C2, C3, e1, e2) for C2, C3 in
itertools.combinations(outgoing, 2)
for e1 in self.adj_out[C1][C2]
for e2 in self.adj_out[C1][C3]]
return motifs
|
normal
|
{
"blob_id": "4a3611ecd70d80575f9f68bf45d67532a17b9c93",
"index": 7527,
"step-1": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n <mask token>\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n <mask token>\n <mask token>\n <mask token>\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n <mask token>\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n <mask token>\n <mask token>\n <mask token>\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n",
"step-2": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n <mask token>\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None\n ) ->None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None:\n info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n <mask token>\n <mask token>\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n <mask token>\n <mask token>\n <mask token>\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n",
"step-3": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n <mask token>\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None\n ) ->None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None:\n info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n\n def edges(self) ->Dict[Tuple[Hashable, Hashable], List]:\n return dict(((u, v), lst) for u, d in self.adj_out.items() for v,\n lst in d.items())\n\n def outgoing_nodes(self, u: Hashable) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.nodes)\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def external_reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C3 in self.hypernodes:\n for c2 in self.adj_out[C3]:\n if c2 in self.nodes:\n motifs += [(C3, c2, c1, e1, e2) for c1 in set(self.\n adj_out[c2].keys()) - self.hypernodes[C3] if c1 in\n self.nodes for e1 in self.adj_out[C3][c2] for e2 in\n self.adj_out[c2][c1]]\n return motifs\n\n def dyadic_interaction_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in\n self.hypernodes and C1 in self.adj_out[C2] for e1 in self.\n adj_out[C1][C2] for e2 in self.adj_out[C2][C1]]\n return motifs\n\n def incoming_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n incoming = list(self.adj_in[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(incoming, 2) for e1 in self.adj_out[C2][C1] for\n e2 in self.adj_out[C3][C1]]\n return motifs\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n",
"step-4": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n\n def add_node(self, u: Hashable, info: Optional[Dict]=None) ->None:\n self.nodes[u] = info if info is not None else dict()\n self.adj_out[u] = dict()\n self.adj_in[u] = dict()\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None\n ) ->None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None:\n info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n\n def edges(self) ->Dict[Tuple[Hashable, Hashable], List]:\n return dict(((u, v), lst) for u, d in self.adj_out.items() for v,\n lst in d.items())\n\n def outgoing_nodes(self, u: Hashable) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.nodes)\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def external_reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C3 in self.hypernodes:\n for c2 in self.adj_out[C3]:\n if c2 in self.nodes:\n motifs += [(C3, c2, c1, e1, e2) for c1 in set(self.\n adj_out[c2].keys()) - self.hypernodes[C3] if c1 in\n self.nodes for e1 in self.adj_out[C3][c2] for e2 in\n self.adj_out[c2][c1]]\n return motifs\n\n def dyadic_interaction_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in\n self.hypernodes and C1 in self.adj_out[C2] for e1 in self.\n adj_out[C1][C2] for e2 in self.adj_out[C2][C1]]\n return motifs\n\n def incoming_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n incoming = list(self.adj_in[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(incoming, 2) for e1 in self.adj_out[C2][C1] for\n e2 in self.adj_out[C3][C1]]\n return motifs\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n",
"step-5": "import itertools\nfrom typing import Tuple, List, Dict, Optional, Hashable, Collection\n\nclass Hypergraph:\n \"\"\"\n Represents a hypergraph, consisting of nodes, directed edges,\n hypernodes (each of which is a set of nodes) and hyperedges (directed edges\n from hypernodes to hypernodes). Contains functionality to extract motifs\n from hypergraphs (Fig 2 of\n http://www.cs.cornell.edu/~cristian/Patterns_of_participant_interactions.html)\n \"\"\"\n def __init__(self):\n # public\n self.nodes = dict()\n self.hypernodes = dict()\n\n # private\n self.adj_out = dict() # out edges for each (hyper)node\n self.adj_in = dict() # in edges for each (hyper)node\n\n def add_node(self, u: Hashable, info: Optional[Dict]=None) -> None:\n self.nodes[u] = info if info is not None else dict()\n self.adj_out[u] = dict()\n self.adj_in[u] = dict()\n\n def add_hypernode(self, name: Hashable,\n nodes: Collection[Hashable],\n info: Optional[dict]=None) -> None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n # edge or hyperedge\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None) -> None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None: info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n\n def edges(self) -> Dict[Tuple[Hashable, Hashable], List]:\n return dict(((u, v), lst) for u, d in self.adj_out.items()\n for v, lst in d.items())\n\n def outgoing_nodes(self, u: Hashable) -> Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items()\n if v in self.nodes)\n\n def outgoing_hypernodes(self, u) -> Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items()\n if v in self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) -> Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) -> Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n\n def outdegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:\n return [sum([len(l) for v, l in self.adj_out[u].items() if v in\n (self.hypernodes if to_hyper else self.nodes)]) for u in\n (self.hypernodes if from_hyper else self.nodes)]\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in\n (self.hypernodes if from_hyper else self.nodes)]) for v in\n (self.hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1]\n for e1 in self.adj_out[C1][c2]\n for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def external_reciprocity_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C3 in self.hypernodes:\n for c2 in self.adj_out[C3]:\n if c2 in self.nodes:\n motifs += [(C3, c2, c1, e1, e2) for c1 in\n set(self.adj_out[c2].keys()) - self.hypernodes[C3]\n if c1 in self.nodes\n for e1 in self.adj_out[C3][c2]\n for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def dyadic_interaction_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper\n \"\"\"\n\n motifs = []\n for C1 in self.hypernodes:\n motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in\n self.hypernodes and C1 in self.adj_out[C2]\n for e1 in self.adj_out[C1][C2]\n for e2 in self.adj_out[C2][C1]]\n return motifs\n\n def incoming_triad_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n incoming = list(self.adj_in[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in\n itertools.combinations(incoming, 2)\n for e1 in self.adj_out[C2][C1]\n for e2 in self.adj_out[C3][C1]]\n return motifs\n\n def outgoing_triad_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in\n itertools.combinations(outgoing, 2)\n for e1 in self.adj_out[C1][C2]\n for e2 in self.adj_out[C1][C3]]\n return motifs\n",
"step-ids": [
8,
10,
15,
16,
20
]
}
|
[
8,
10,
15,
16,
20
] |
# 내 풀이
with open("sequence.protein.2.fasta", "w") as fw:
with open("sequence.protein.fasta", "r") as fr:
for line in fr:
fw.write(line)
# 강사님 풀이
# fr = open('sequence.protein.fasta','r'):
# lines=fr.readlines()
# seq_list=list()
# for line in lines:
|
normal
|
{
"blob_id": "84fb0e364ee3cd846148abfc9326f404f008c510",
"index": 7908,
"step-1": "<mask token>\n",
"step-2": "with open('sequence.protein.2.fasta', 'w') as fw:\n with open('sequence.protein.fasta', 'r') as fr:\n for line in fr:\n fw.write(line)\n",
"step-3": "# 내 풀이\nwith open(\"sequence.protein.2.fasta\", \"w\") as fw:\n with open(\"sequence.protein.fasta\", \"r\") as fr:\n for line in fr:\n fw.write(line)\n\n# 강사님 풀이\n# fr = open('sequence.protein.fasta','r'):\n# lines=fr.readlines()\n# seq_list=list()\n# for line in lines:\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Copyright 2016 Huawei, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from openstackclient.compute.v2 import console
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import utils
class TestConsole(compute_fakes.TestComputev2):
def setUp(self):
super(TestConsole, self).setUp()
# SDK mock
self.app.client_manager.sdk_connection = mock.Mock()
self.app.client_manager.sdk_connection.compute = mock.Mock()
self.sdk_client = self.app.client_manager.sdk_connection.compute
self.sdk_client.find_server = mock.Mock()
self.sdk_client.get_server_console_output = mock.Mock()
class TestConsoleLog(TestConsole):
_server = compute_fakes.create_one_server()
def setUp(self):
super(TestConsoleLog, self).setUp()
self.sdk_client.find_server.return_value = self._server
self.cmd = console.ShowConsoleLog(self.app, None)
def test_show_no_args(self):
arglist = []
verifylist = []
self.assertRaises(
utils.ParserException,
self.check_parser,
self.cmd,
arglist,
verifylist,
)
def test_show(self):
arglist = ['fake_server']
verifylist = [('server', 'fake_server')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
output = {'output': '1st line\n2nd line\n'}
self.sdk_client.get_server_console_output.return_value = output
self.cmd.take_action(parsed_args)
self.sdk_client.find_server.assert_called_with(
name_or_id='fake_server', ignore_missing=False
)
self.sdk_client.get_server_console_output.assert_called_with(
self._server.id, length=None
)
stdout = self.app.stdout.content
self.assertEqual(stdout[0], output['output'])
def test_show_lines(self):
arglist = ['fake_server', '--lines', '15']
verifylist = [('server', 'fake_server'), ('lines', 15)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
output = {'output': '1st line\n2nd line'}
self.sdk_client.get_server_console_output.return_value = output
self.cmd.take_action(parsed_args)
self.sdk_client.find_server.assert_called_with(
name_or_id='fake_server', ignore_missing=False
)
self.sdk_client.get_server_console_output.assert_called_with(
self._server.id, length=15
)
class TestConsoleUrlShow(TestConsole):
_server = compute_fakes.create_one_server()
def setUp(self):
super(TestConsoleUrlShow, self).setUp()
self.sdk_client.find_server.return_value = self._server
fake_console_data = {
'url': 'http://localhost',
'protocol': 'fake_protocol',
'type': 'fake_type',
}
self.sdk_client.create_console = mock.Mock(
return_value=fake_console_data
)
self.columns = (
'protocol',
'type',
'url',
)
self.data = (
fake_console_data['protocol'],
fake_console_data['type'],
fake_console_data['url'],
)
self.cmd = console.ShowConsoleURL(self.app, None)
def test_console_url_show_by_default(self):
arglist = [
'foo_vm',
]
verifylist = [
('url_type', 'novnc'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='novnc'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_novnc(self):
arglist = [
'--novnc',
'foo_vm',
]
verifylist = [
('url_type', 'novnc'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='novnc'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_xvpvnc(self):
arglist = [
'--xvpvnc',
'foo_vm',
]
verifylist = [
('url_type', 'xvpvnc'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='xvpvnc'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_spice(self):
arglist = [
'--spice',
'foo_vm',
]
verifylist = [
('url_type', 'spice-html5'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='spice-html5'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_rdp(self):
arglist = [
'--rdp',
'foo_vm',
]
verifylist = [
('url_type', 'rdp-html5'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='rdp-html5'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_serial(self):
arglist = [
'--serial',
'foo_vm',
]
verifylist = [
('url_type', 'serial'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='serial'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_mks(self):
arglist = [
'--mks',
'foo_vm',
]
verifylist = [
('url_type', 'webmks'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='webmks'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
|
normal
|
{
"blob_id": "cc9485dea0975a0974f037b129816a9359b2b622",
"index": 2875,
"step-1": "<mask token>\n\n\nclass TestConsoleUrlShow(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleUrlShow, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n fake_console_data = {'url': 'http://localhost', 'protocol':\n 'fake_protocol', 'type': 'fake_type'}\n self.sdk_client.create_console = mock.Mock(return_value=\n fake_console_data)\n self.columns = 'protocol', 'type', 'url'\n self.data = fake_console_data['protocol'], fake_console_data['type'\n ], fake_console_data['url']\n self.cmd = console.ShowConsoleURL(self.app, None)\n\n def test_console_url_show_by_default(self):\n arglist = ['foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_novnc(self):\n arglist = ['--novnc', 'foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_xvpvnc(self):\n arglist = ['--xvpvnc', 'foo_vm']\n verifylist = [('url_type', 'xvpvnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='xvpvnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_spice(self):\n arglist = ['--spice', 'foo_vm']\n verifylist = [('url_type', 'spice-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='spice-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_rdp(self):\n arglist = ['--rdp', 'foo_vm']\n verifylist = [('url_type', 'rdp-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='rdp-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_serial(self):\n arglist = ['--serial', 'foo_vm']\n verifylist = [('url_type', 'serial'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='serial')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_mks(self):\n arglist = ['--mks', 'foo_vm']\n verifylist = [('url_type', 'webmks'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='webmks')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n",
"step-2": "<mask token>\n\n\nclass TestConsoleLog(TestConsole):\n <mask token>\n\n def setUp(self):\n super(TestConsoleLog, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n self.cmd = console.ShowConsoleLog(self.app, None)\n\n def test_show_no_args(self):\n arglist = []\n verifylist = []\n self.assertRaises(utils.ParserException, self.check_parser, self.\n cmd, arglist, verifylist)\n\n def test_show(self):\n arglist = ['fake_server']\n verifylist = [('server', 'fake_server')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n output = {'output': '1st line\\n2nd line\\n'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n self.sdk_client.find_server.assert_called_with(name_or_id=\n 'fake_server', ignore_missing=False)\n self.sdk_client.get_server_console_output.assert_called_with(self.\n _server.id, length=None)\n stdout = self.app.stdout.content\n self.assertEqual(stdout[0], output['output'])\n\n def test_show_lines(self):\n arglist = ['fake_server', '--lines', '15']\n verifylist = [('server', 'fake_server'), ('lines', 15)]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n output = {'output': '1st line\\n2nd line'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n self.sdk_client.find_server.assert_called_with(name_or_id=\n 'fake_server', ignore_missing=False)\n self.sdk_client.get_server_console_output.assert_called_with(self.\n _server.id, length=15)\n\n\nclass TestConsoleUrlShow(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleUrlShow, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n fake_console_data = {'url': 'http://localhost', 'protocol':\n 'fake_protocol', 'type': 'fake_type'}\n self.sdk_client.create_console = mock.Mock(return_value=\n fake_console_data)\n self.columns = 'protocol', 'type', 'url'\n self.data = fake_console_data['protocol'], fake_console_data['type'\n ], fake_console_data['url']\n self.cmd = console.ShowConsoleURL(self.app, None)\n\n def test_console_url_show_by_default(self):\n arglist = ['foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_novnc(self):\n arglist = ['--novnc', 'foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_xvpvnc(self):\n arglist = ['--xvpvnc', 'foo_vm']\n verifylist = [('url_type', 'xvpvnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='xvpvnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_spice(self):\n arglist = ['--spice', 'foo_vm']\n verifylist = [('url_type', 'spice-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='spice-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_rdp(self):\n arglist = ['--rdp', 'foo_vm']\n verifylist = [('url_type', 'rdp-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='rdp-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_serial(self):\n arglist = ['--serial', 'foo_vm']\n verifylist = [('url_type', 'serial'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='serial')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_mks(self):\n arglist = ['--mks', 'foo_vm']\n verifylist = [('url_type', 'webmks'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='webmks')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n",
"step-3": "<mask token>\n\n\nclass TestConsole(compute_fakes.TestComputev2):\n\n def setUp(self):\n super(TestConsole, self).setUp()\n self.app.client_manager.sdk_connection = mock.Mock()\n self.app.client_manager.sdk_connection.compute = mock.Mock()\n self.sdk_client = self.app.client_manager.sdk_connection.compute\n self.sdk_client.find_server = mock.Mock()\n self.sdk_client.get_server_console_output = mock.Mock()\n\n\nclass TestConsoleLog(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleLog, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n self.cmd = console.ShowConsoleLog(self.app, None)\n\n def test_show_no_args(self):\n arglist = []\n verifylist = []\n self.assertRaises(utils.ParserException, self.check_parser, self.\n cmd, arglist, verifylist)\n\n def test_show(self):\n arglist = ['fake_server']\n verifylist = [('server', 'fake_server')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n output = {'output': '1st line\\n2nd line\\n'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n self.sdk_client.find_server.assert_called_with(name_or_id=\n 'fake_server', ignore_missing=False)\n self.sdk_client.get_server_console_output.assert_called_with(self.\n _server.id, length=None)\n stdout = self.app.stdout.content\n self.assertEqual(stdout[0], output['output'])\n\n def test_show_lines(self):\n arglist = ['fake_server', '--lines', '15']\n verifylist = [('server', 'fake_server'), ('lines', 15)]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n output = {'output': '1st line\\n2nd line'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n self.sdk_client.find_server.assert_called_with(name_or_id=\n 'fake_server', ignore_missing=False)\n self.sdk_client.get_server_console_output.assert_called_with(self.\n _server.id, length=15)\n\n\nclass TestConsoleUrlShow(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleUrlShow, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n fake_console_data = {'url': 'http://localhost', 'protocol':\n 'fake_protocol', 'type': 'fake_type'}\n self.sdk_client.create_console = mock.Mock(return_value=\n fake_console_data)\n self.columns = 'protocol', 'type', 'url'\n self.data = fake_console_data['protocol'], fake_console_data['type'\n ], fake_console_data['url']\n self.cmd = console.ShowConsoleURL(self.app, None)\n\n def test_console_url_show_by_default(self):\n arglist = ['foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_novnc(self):\n arglist = ['--novnc', 'foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_xvpvnc(self):\n arglist = ['--xvpvnc', 'foo_vm']\n verifylist = [('url_type', 'xvpvnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='xvpvnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_spice(self):\n arglist = ['--spice', 'foo_vm']\n verifylist = [('url_type', 'spice-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='spice-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_rdp(self):\n arglist = ['--rdp', 'foo_vm']\n verifylist = [('url_type', 'rdp-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='rdp-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_serial(self):\n arglist = ['--serial', 'foo_vm']\n verifylist = [('url_type', 'serial'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='serial')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_mks(self):\n arglist = ['--mks', 'foo_vm']\n verifylist = [('url_type', 'webmks'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='webmks')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n",
"step-4": "from unittest import mock\nfrom openstackclient.compute.v2 import console\nfrom openstackclient.tests.unit.compute.v2 import fakes as compute_fakes\nfrom openstackclient.tests.unit import utils\n\n\nclass TestConsole(compute_fakes.TestComputev2):\n\n def setUp(self):\n super(TestConsole, self).setUp()\n self.app.client_manager.sdk_connection = mock.Mock()\n self.app.client_manager.sdk_connection.compute = mock.Mock()\n self.sdk_client = self.app.client_manager.sdk_connection.compute\n self.sdk_client.find_server = mock.Mock()\n self.sdk_client.get_server_console_output = mock.Mock()\n\n\nclass TestConsoleLog(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleLog, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n self.cmd = console.ShowConsoleLog(self.app, None)\n\n def test_show_no_args(self):\n arglist = []\n verifylist = []\n self.assertRaises(utils.ParserException, self.check_parser, self.\n cmd, arglist, verifylist)\n\n def test_show(self):\n arglist = ['fake_server']\n verifylist = [('server', 'fake_server')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n output = {'output': '1st line\\n2nd line\\n'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n self.sdk_client.find_server.assert_called_with(name_or_id=\n 'fake_server', ignore_missing=False)\n self.sdk_client.get_server_console_output.assert_called_with(self.\n _server.id, length=None)\n stdout = self.app.stdout.content\n self.assertEqual(stdout[0], output['output'])\n\n def test_show_lines(self):\n arglist = ['fake_server', '--lines', '15']\n verifylist = [('server', 'fake_server'), ('lines', 15)]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n output = {'output': '1st line\\n2nd line'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n self.sdk_client.find_server.assert_called_with(name_or_id=\n 'fake_server', ignore_missing=False)\n self.sdk_client.get_server_console_output.assert_called_with(self.\n _server.id, length=15)\n\n\nclass TestConsoleUrlShow(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleUrlShow, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n fake_console_data = {'url': 'http://localhost', 'protocol':\n 'fake_protocol', 'type': 'fake_type'}\n self.sdk_client.create_console = mock.Mock(return_value=\n fake_console_data)\n self.columns = 'protocol', 'type', 'url'\n self.data = fake_console_data['protocol'], fake_console_data['type'\n ], fake_console_data['url']\n self.cmd = console.ShowConsoleURL(self.app, None)\n\n def test_console_url_show_by_default(self):\n arglist = ['foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_novnc(self):\n arglist = ['--novnc', 'foo_vm']\n verifylist = [('url_type', 'novnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='novnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_xvpvnc(self):\n arglist = ['--xvpvnc', 'foo_vm']\n verifylist = [('url_type', 'xvpvnc'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='xvpvnc')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_spice(self):\n arglist = ['--spice', 'foo_vm']\n verifylist = [('url_type', 'spice-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='spice-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_rdp(self):\n arglist = ['--rdp', 'foo_vm']\n verifylist = [('url_type', 'rdp-html5'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='rdp-html5')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_serial(self):\n arglist = ['--serial', 'foo_vm']\n verifylist = [('url_type', 'serial'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='serial')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_mks(self):\n arglist = ['--mks', 'foo_vm']\n verifylist = [('url_type', 'webmks'), ('server', 'foo_vm')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(self._server\n .id, console_type='webmks')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n",
"step-5": "# Copyright 2016 Huawei, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nfrom unittest import mock\n\nfrom openstackclient.compute.v2 import console\nfrom openstackclient.tests.unit.compute.v2 import fakes as compute_fakes\nfrom openstackclient.tests.unit import utils\n\n\nclass TestConsole(compute_fakes.TestComputev2):\n def setUp(self):\n super(TestConsole, self).setUp()\n\n # SDK mock\n self.app.client_manager.sdk_connection = mock.Mock()\n self.app.client_manager.sdk_connection.compute = mock.Mock()\n self.sdk_client = self.app.client_manager.sdk_connection.compute\n self.sdk_client.find_server = mock.Mock()\n self.sdk_client.get_server_console_output = mock.Mock()\n\n\nclass TestConsoleLog(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleLog, self).setUp()\n\n self.sdk_client.find_server.return_value = self._server\n\n self.cmd = console.ShowConsoleLog(self.app, None)\n\n def test_show_no_args(self):\n arglist = []\n verifylist = []\n self.assertRaises(\n utils.ParserException,\n self.check_parser,\n self.cmd,\n arglist,\n verifylist,\n )\n\n def test_show(self):\n arglist = ['fake_server']\n verifylist = [('server', 'fake_server')]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n output = {'output': '1st line\\n2nd line\\n'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n\n self.sdk_client.find_server.assert_called_with(\n name_or_id='fake_server', ignore_missing=False\n )\n self.sdk_client.get_server_console_output.assert_called_with(\n self._server.id, length=None\n )\n stdout = self.app.stdout.content\n self.assertEqual(stdout[0], output['output'])\n\n def test_show_lines(self):\n arglist = ['fake_server', '--lines', '15']\n verifylist = [('server', 'fake_server'), ('lines', 15)]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n output = {'output': '1st line\\n2nd line'}\n self.sdk_client.get_server_console_output.return_value = output\n self.cmd.take_action(parsed_args)\n\n self.sdk_client.find_server.assert_called_with(\n name_or_id='fake_server', ignore_missing=False\n )\n self.sdk_client.get_server_console_output.assert_called_with(\n self._server.id, length=15\n )\n\n\nclass TestConsoleUrlShow(TestConsole):\n _server = compute_fakes.create_one_server()\n\n def setUp(self):\n super(TestConsoleUrlShow, self).setUp()\n self.sdk_client.find_server.return_value = self._server\n fake_console_data = {\n 'url': 'http://localhost',\n 'protocol': 'fake_protocol',\n 'type': 'fake_type',\n }\n self.sdk_client.create_console = mock.Mock(\n return_value=fake_console_data\n )\n\n self.columns = (\n 'protocol',\n 'type',\n 'url',\n )\n self.data = (\n fake_console_data['protocol'],\n fake_console_data['type'],\n fake_console_data['url'],\n )\n\n self.cmd = console.ShowConsoleURL(self.app, None)\n\n def test_console_url_show_by_default(self):\n arglist = [\n 'foo_vm',\n ]\n verifylist = [\n ('url_type', 'novnc'),\n ('server', 'foo_vm'),\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(\n self._server.id, console_type='novnc'\n )\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_novnc(self):\n arglist = [\n '--novnc',\n 'foo_vm',\n ]\n verifylist = [\n ('url_type', 'novnc'),\n ('server', 'foo_vm'),\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(\n self._server.id, console_type='novnc'\n )\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_xvpvnc(self):\n arglist = [\n '--xvpvnc',\n 'foo_vm',\n ]\n verifylist = [\n ('url_type', 'xvpvnc'),\n ('server', 'foo_vm'),\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(\n self._server.id, console_type='xvpvnc'\n )\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_spice(self):\n arglist = [\n '--spice',\n 'foo_vm',\n ]\n verifylist = [\n ('url_type', 'spice-html5'),\n ('server', 'foo_vm'),\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(\n self._server.id, console_type='spice-html5'\n )\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_rdp(self):\n arglist = [\n '--rdp',\n 'foo_vm',\n ]\n verifylist = [\n ('url_type', 'rdp-html5'),\n ('server', 'foo_vm'),\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(\n self._server.id, console_type='rdp-html5'\n )\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_serial(self):\n arglist = [\n '--serial',\n 'foo_vm',\n ]\n verifylist = [\n ('url_type', 'serial'),\n ('server', 'foo_vm'),\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(\n self._server.id, console_type='serial'\n )\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n\n def test_console_url_show_with_mks(self):\n arglist = [\n '--mks',\n 'foo_vm',\n ]\n verifylist = [\n ('url_type', 'webmks'),\n ('server', 'foo_vm'),\n ]\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n columns, data = self.cmd.take_action(parsed_args)\n self.sdk_client.create_console.assert_called_once_with(\n self._server.id, console_type='webmks'\n )\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.data, data)\n",
"step-ids": [
10,
15,
18,
19,
20
]
}
|
[
10,
15,
18,
19,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
run_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from run_AtariGame import run_Game
run_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001)
<|reserved_special_token_1|>
# -*- coding: UTF-8 -*-
'''
model = DQN,DDQN,PDQN,PDDQN,DQN_PER,DDQN_PER,DQN_InAday,DQN_PER_Ipm...
'''
# -----------ContolGame------------
# CartPole - v1, MountainCar - v0, Acrobot - v1, Pendulum - v0
# from run_ContolGame import run_Game
# run_Game('DQN', 'CartPole-v1', episodes=400) # model,env,episodes
# -----------AtariGame - ------------
from run_AtariGame import run_Game
run_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001) # model,env,lifes,episodes
|
flexible
|
{
"blob_id": "f49a133fa94aae791ef0f1eec54cf0629f45a0ed",
"index": 5153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrun_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001)\n",
"step-3": "<mask token>\nfrom run_AtariGame import run_Game\nrun_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001)\n",
"step-4": "# -*- coding: UTF-8 -*-\n'''\nmodel = DQN,DDQN,PDQN,PDDQN,DQN_PER,DDQN_PER,DQN_InAday,DQN_PER_Ipm...\n'''\n# -----------ContolGame------------\n# CartPole - v1, MountainCar - v0, Acrobot - v1, Pendulum - v0\n# from run_ContolGame import run_Game\n# run_Game('DQN', 'CartPole-v1', episodes=400) # model,env,episodes\n\n# -----------AtariGame - ------------\nfrom run_AtariGame import run_Game\nrun_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001) # model,env,lifes,episodes\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.