code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
from codar.cheetah import Campaign
from codar.cheetah import parameters as p
from codar.savanna.machines import SummitNode
import copy
def get_shared_node_layout (n_writers, n_readers):
nc = SummitNode()
for i in range(n_writers):
nc.cpu[i] = "writer:{}".format(i)
for i in range(n_readers):
nc.cpu[i+n_writers] = "reader:{}".format(i)
return [nc]
def get_separate_node_layout (n_writers, n_readers):
nc_w = SummitNode()
for i in range(n_writers):
nc_w.cpu[i] = "writer:{}".format(i)
nc_r = SummitNode()
for i in range(n_readers):
nc_r.cpu[i] = "reader:{}".format(i)
return [nc_w,nc_r]
def get_sweeps(ref_params_d, n_writers):
params_d = copy.deepcopy(ref_params_d)
params_d['writer']['nprocs'].values=[n_writers]
params_d['writer']['decomposition'].values=[n_writers]
all_dicts = []
all_sweeps = []
# Loop over ratio of the no. of reader ranks
for r in [8]:
par_r = copy.deepcopy(params_d)
par_r['reader']['nprocs'].values = [n_writers//r]
par_r['reader']['decomposition'].values = [n_writers//r]
# Loop over data size per process
for d in ['512MB']:
par_r_d = copy.deepcopy(par_r)
par_r_d['writer']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]
par_r_d['reader']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]
# Loop over engines
for e in ["bp4","sst-rdma","sst-tcp","ssc","insitumpi"]:
par_r_d_e = copy.deepcopy(par_r_d)
par_r_d_e['writer']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]
par_r_d_e['reader']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]
all_dicts.append(par_r_d_e)
for d in all_dicts:
sweep_params = []
sweep_params.extend(list(d['writer'].values()))
sweep_params.extend(list(d['reader'].values()))
sep_node_layout = get_separate_node_layout(32, 32)
shared_node_layout = None
if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0] == 8:
shared_node_layout = get_shared_node_layout(32,4)
elif n_writers//32 < 4096:
shared_node_layout = get_shared_node_layout(16,16)
rc_dependency = None
if 'bp4' in d['writer']['xmlfile'].values[0]:
rc_dependency = {'reader': 'writer'}
sweep_sep = p.Sweep(parameters = sweep_params, node_layout = {'summit':sep_node_layout}, rc_dependency=rc_dependency)
if 'insitumpi' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode='mpmd'
if 'ssc' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode='mpmd'
sweep_shared = None
if shared_node_layout:
sweep_shared = p.Sweep(parameters = sweep_params, node_layout = {'summit':shared_node_layout}, rc_dependency=rc_dependency)
if n_writers//32 < 4096:
all_sweeps.append(sweep_sep)
if sweep_shared:
all_sweeps.append(sweep_shared)
return all_sweeps
class Adios_iotest(Campaign):
# A name for the campaign
name = "ADIOS_IOTEST"
# A list of the codes that will be part of the workflow
# If there is an adios xml file associated with the codes, list it here
codes = [ ("writer", dict(exe="adios_iotest")),
("reader", dict(exe="adios_iotest"))
]
# A list of machines that this campaign must be supported on
supported_machines = ['local', 'theta', 'summit']
# Option to kill an experiment (just one experiment, not the full sweep or campaign) if one of the codes fails
kill_on_partial_failure = True
# Some pre-processing in the experiment directory
# This is performed when the campaign directory is created (before the campaign is launched)
run_dir_setup_script = None
# A post-processing script to be run in the experiment directory after the experiment completes
# For example, removing some large files after the experiment is done
run_post_process_script = 'cleanup.sh'
# umask applied to your directory in the campaign so that colleagues can view files
umask = '027'
# Scheduler information: job queue, account-id etc. Leave it to None if running on a local machine
scheduler_options = {'theta': {'project':'CSC249ADCD01', 'queue': 'batch'},
'summit': {'project':'csc303'}}
# Setup your environment. Loading modules, setting the LD_LIBRARY_PATH etc.
# Ensure this script is executable
app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh', 'summit':'env_setup.sh'}
input_files = [
'staging-perf-test-16MB-2to1.txt',
'staging-perf-test-16MB-8to1.txt',
'staging-perf-test-1MB-2to1.txt',
'staging-perf-test-1MB-8to1.txt',
'staging-perf-test-512MB-2to1.txt',
'staging-perf-test-512MB-8to1.txt',
'staging-perf-test-bp4.xml',
'staging-perf-test-insitumpi.xml',
'staging-perf-test-ssc.xml',
'staging-perf-test-sst-rdma.xml',
'staging-perf-test-sst-tcp.xml'
]
# Create the sweep parameters for a sweep
params = {}
params['writer'] = {}
params['reader'] = {}
params['writer']['nprocs'] = p.ParamRunner ('writer', 'nprocs', [])
params['writer']['appid'] = p.ParamCmdLineOption ('writer', 'appid', '-a', [1])
params['writer']['configfile'] = p.ParamCmdLineOption ('writer', 'configFile', '-c', [])
params['writer']['scaling'] = p.ParamCmdLineOption ('writer', 'scaling', '-w', [None])
params['writer']['xmlfile'] = p.ParamCmdLineOption ('writer', 'xmlfile', '-x', [])
params['writer']['decomposition'] = p.ParamCmdLineOption ('writer', 'decomposition', '-d', [])
params['reader']['nprocs'] = p.ParamRunner ('reader', 'nprocs', [])
params['reader']['appid'] = p.ParamCmdLineOption ('reader', 'appid', '-a', [2])
params['reader']['configfile'] = p.ParamCmdLineOption ('reader', 'configFile', '-c', [])
params['reader']['scaling'] = p.ParamCmdLineOption ('reader', 'scaling', '-w', [None])
params['reader']['xmlfile'] = p.ParamCmdLineOption ('reader', 'xmlfile', '-x', [])
params['reader']['decomposition'] = p.ParamCmdLineOption ('reader', 'decomposition', '-d', [])
sweeps = []
for n in [8]:
group_sweeps = get_sweeps (params, n*32)
# pdb.set_trace()
s_group = p.SweepGroup("{}-nodes".format(n),
walltime=7200,
per_run_timeout=600,
component_inputs={'writer':input_files},
#nodes=128,
parameter_groups=group_sweeps,)
sweeps.append(s_group)
|
normal
|
{
"blob_id": "475cc5130e847b1a74a33bfa5cbc202a6bf31621",
"index": 6932,
"step-1": "<mask token>\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-2": "<mask token>\n\n\ndef get_shared_node_layout(n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = 'writer:{}'.format(i)\n for i in range(n_readers):\n nc.cpu[i + n_writers] = 'reader:{}'.format(i)\n return [nc]\n\n\n<mask token>\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-3": "<mask token>\n\n\ndef get_shared_node_layout(n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = 'writer:{}'.format(i)\n for i in range(n_readers):\n nc.cpu[i + n_writers] = 'reader:{}'.format(i)\n return [nc]\n\n\ndef get_separate_node_layout(n_writers, n_readers):\n nc_w = SummitNode()\n for i in range(n_writers):\n nc_w.cpu[i] = 'writer:{}'.format(i)\n nc_r = SummitNode()\n for i in range(n_readers):\n nc_r.cpu[i] = 'reader:{}'.format(i)\n return [nc_w, nc_r]\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-4": "from codar.cheetah import Campaign\nfrom codar.cheetah import parameters as p\nfrom codar.savanna.machines import SummitNode\nimport copy\n\n\ndef get_shared_node_layout(n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = 'writer:{}'.format(i)\n for i in range(n_readers):\n nc.cpu[i + n_writers] = 'reader:{}'.format(i)\n return [nc]\n\n\ndef get_separate_node_layout(n_writers, n_readers):\n nc_w = SummitNode()\n for i in range(n_writers):\n nc_w.cpu[i] = 'writer:{}'.format(i)\n nc_r = SummitNode()\n for i in range(n_readers):\n nc_r.cpu[i] = 'reader:{}'.format(i)\n return [nc_w, nc_r]\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-5": "from codar.cheetah import Campaign\nfrom codar.cheetah import parameters as p\nfrom codar.savanna.machines import SummitNode\nimport copy\n\ndef get_shared_node_layout (n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = \"writer:{}\".format(i)\n for i in range(n_readers):\n nc.cpu[i+n_writers] = \"reader:{}\".format(i)\n return [nc]\n\ndef get_separate_node_layout (n_writers, n_readers):\n nc_w = SummitNode()\n for i in range(n_writers):\n nc_w.cpu[i] = \"writer:{}\".format(i)\n\n nc_r = SummitNode()\n for i in range(n_readers):\n nc_r.cpu[i] = \"reader:{}\".format(i)\n\n return [nc_w,nc_r]\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values=[n_writers]\n params_d['writer']['decomposition'].values=[n_writers]\n\n all_dicts = []\n all_sweeps = []\n\n # Loop over ratio of the no. of reader ranks\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers//r]\n par_r['reader']['decomposition'].values = [n_writers//r]\n\n # Loop over data size per process\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]\n par_r_d['reader']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]\n\n # Loop over engines\n for e in [\"bp4\",\"sst-rdma\",\"sst-tcp\",\"ssc\",\"insitumpi\"]:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]\n\n all_dicts.append(par_r_d_e)\n\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0] == 8:\n shared_node_layout = get_shared_node_layout(32,4)\n elif n_writers//32 < 4096:\n shared_node_layout = get_shared_node_layout(16,16)\n\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters = sweep_params, node_layout = {'summit':sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode='mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode='mpmd'\n\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters = sweep_params, node_layout = {'summit':shared_node_layout}, rc_dependency=rc_dependency)\n\n if n_writers//32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n\n # A name for the campaign\n name = \"ADIOS_IOTEST\"\n\n # A list of the codes that will be part of the workflow\n # If there is an adios xml file associated with the codes, list it here\n codes = [ (\"writer\", dict(exe=\"adios_iotest\")),\n (\"reader\", dict(exe=\"adios_iotest\"))\n ]\n\n # A list of machines that this campaign must be supported on\n supported_machines = ['local', 'theta', 'summit']\n\n # Option to kill an experiment (just one experiment, not the full sweep or campaign) if one of the codes fails\n kill_on_partial_failure = True\n\n # Some pre-processing in the experiment directory\n # This is performed when the campaign directory is created (before the campaign is launched)\n run_dir_setup_script = None\n\n # A post-processing script to be run in the experiment directory after the experiment completes\n # For example, removing some large files after the experiment is done\n run_post_process_script = 'cleanup.sh'\n\n # umask applied to your directory in the campaign so that colleagues can view files\n umask = '027'\n\n # Scheduler information: job queue, account-id etc. Leave it to None if running on a local machine\n scheduler_options = {'theta': {'project':'CSC249ADCD01', 'queue': 'batch'},\n 'summit': {'project':'csc303'}}\n\n # Setup your environment. Loading modules, setting the LD_LIBRARY_PATH etc.\n # Ensure this script is executable\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh', 'summit':'env_setup.sh'}\n\n input_files = [\n 'staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt',\n 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt',\n 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml',\n 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml',\n 'staging-perf-test-sst-tcp.xml'\n ]\n\n # Create the sweep parameters for a sweep\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n\n params['writer']['nprocs'] = p.ParamRunner ('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption ('writer', 'appid', '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption ('writer', 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption ('writer', 'scaling', '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption ('writer', 'xmlfile', '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption ('writer', 'decomposition', '-d', [])\n\n params['reader']['nprocs'] = p.ParamRunner ('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption ('reader', 'appid', '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption ('reader', 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption ('reader', 'scaling', '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption ('reader', 'xmlfile', '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption ('reader', 'decomposition', '-d', [])\n\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps (params, n*32)\n # pdb.set_trace()\n s_group = p.SweepGroup(\"{}-nodes\".format(n),\n walltime=7200,\n per_run_timeout=600,\n component_inputs={'writer':input_files},\n #nodes=128,\n parameter_groups=group_sweeps,)\n sweeps.append(s_group)\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
@csrf_exempt
def login_form(request):
formulario = '<form action="login" method="POST">'
formulario += 'Nombre<br><input type="text" name="Usuario"><br>'
formulario += 'Contraseña<br><input type="password" name="Password"><br>'
formulario += '<br><input type="submit" value="Entrar"></form>'
return formulario
<|reserved_special_token_0|>
def lista_megustas():
lista_todos = Aparcamiento.objects.all()
lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]
Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'
Existe = False
for i in lista_ordenada:
megustas = i.contador_megusta
if megustas != 0:
Response += ('<li><a href=' + i.content_url + '>' + i.nombre +
'<br></a>')
Response += ('Dirección: ' + i.clase_vial + ' ' + i.
localizacion + ', nº ' + str(i.num))
Response += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.entidad + '>' + 'Más información<br></a><br>')
Existe = True
if Existe == False:
Response += (
'Aún no se han registrado comentarios para ningún aparcamiento')
Response += '</br></br>'
return Response
<|reserved_special_token_0|>
def lista_aparcamientos():
lista = ''
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento + '<a href="' +
url_aparcamiento + '">\t--> Más información</a></p></li>')
return lista
def aparcamientos_seleccionados(user, request):
user_object = User.objects.get(username=user)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
paginator = Paginator(lista_seleccionados, 5)
page = request.GET.get('page')
try:
seleccionados = paginator.page(page)
except PageNotAnInteger:
seleccionados = paginator.page(1)
except EmptyPage:
seleccionados = paginator.page(paginator.num_pages)
lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'
for i in seleccionados:
lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)
lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i
.aparcamiento.nombre + '<br></a>')
lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.
aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))
lista += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.aparcamiento.entidad + '>' + 'Más información</a><br>')
except ObjectDoesNotExist:
lista = 'El usuario aún no ha seleccionado ningún aparcamiento'
seleccionados = ''
return lista, seleccionados
def accesibles(value):
accesibles = '<form action="" method="POST">'
accesibles += '<button type="submit" name="Accesible" value="' + str(value
) + '"> Accesibles</button></form>'
return accesibles
@csrf_exempt
def pagina_principal(request):
formulario = login_form(request)
list_megustas = lista_megustas()
users = paginas_personales()
value = 1
accesible = accesibles(value)
template = get_template('index.html')
if request.user.is_authenticated():
username = str(request.user)
formulario = 'Bienvenido ' + username
formulario += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
if key == 'Accesible':
value = request.POST['Accesible']
if value == '1':
lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)
lista = '<a href="http://localhost:1234/" > Volver </a>'
value = 0
for i in lista_accesibles:
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista += ('<li><p>' + nombre_aparcamiento +
'</p><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
else:
lista = '<a href="http://localhost:1234/" > Volver </a>'
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento +
'. URL del aparcamiento: ' +
'<a href="aparcamientos/' + url_aparcamiento +
'">\t⇾ Más información</a></br></p>')
value = 1
accesible = accesibles(value)
c = Context({'login': formulario, 'list_users': lista,
'accesible': accesible})
else:
init = Aparcamiento.objects.all()
if len(init) == 0:
get_data()
c = Context({'login': formulario, 'list': list_megustas,
'list_users': users, 'accesible': accesible})
renderizado = template.render(c)
return HttpResponse(renderizado)
<|reserved_special_token_0|>
@csrf_exempt
def aparcamientos(request):
lista = lista_aparcamientos()
filtrar = '<form action="" method="POST">'
filtrar += '<br><br><input type="text" name="distrito">'
filtrar += '<input type="submit" value="Filtrar por distrito">'
template = get_template('aparcamientos.html')
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
else:
form_user = 'Para loguearse vaya al botón de Inicio'
if request.method == 'POST':
filtro_distrito = request.POST['distrito']
filtro_distrito = filtro_distrito.upper()
if filtro_distrito == '':
lista_filtrada = (
'No ha introducido ningún filtro, introduzca distrito para filtrar '
+ lista)
else:
aparcamientos_filtrados = Aparcamiento.objects.all()
Encontrado = False
lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +
' son: ')
for i in aparcamientos_filtrados:
if filtro_distrito == i.distrito:
Encontrado = True
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista_filtrada += ('<p>' + nombre_aparcamiento +
'</p><li><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
if Encontrado == False:
lista_filtrada = ('Introduzca un nuevo distrito. ' +
filtro_distrito + ' no es válido')
c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':
form_user})
else:
c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
@csrf_exempt
def aparcamientos_id(request, recurso):
template = get_template('aparcamientos.html')
num_megustas = 0
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
print(key)
if key == 'Me+Gusta':
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1
aparcamiento.save()
num_megustas = aparcamiento.contador_megusta
else:
coment = request.POST['Comentario']
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_coments = aparcamiento.contador_coments + 1
aparcamiento.save()
p = Comentario(aparcamiento=aparcamiento, coment=coment)
p.save()
try:
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
nombre = aparcamiento.nombre
descripcion = aparcamiento.descripcion
accesibilidad = aparcamiento.accesibilidad
localizacion = aparcamiento.localizacion
via = aparcamiento.clase_vial
num = aparcamiento.num
localidad = aparcamiento.localidad
provincia = aparcamiento.provincia
codigo_postal = aparcamiento.codigo_postal
barrio = aparcamiento.barrio
distrito = aparcamiento.distrito
coordenada_x = aparcamiento.coordenada_x
coordenada_y = aparcamiento.coordenada_y
telefono = aparcamiento.telefono
email = aparcamiento.email
if telefono == '':
telefono = 'No disponible'
if email == '':
email = 'No disponible'
if accesibilidad == 1:
acces = 'Libre'
else:
acces = 'Ocupado'
lista_aparcamientos = Aparcamiento.objects.all()
list_coments = ''
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
num_megustas = aparcamiento.contador_megusta
for i in lista_aparcamientos:
if i.entidad == recurso:
comentarios = Comentario.objects.filter(aparcamiento=i)
if len(comentarios) != 0:
list_coments = '<li><p>COMENTARIOS</p><ol>'
for j in comentarios:
list_coments += '<li>' + j.coment + '<br>'
Response = (
'<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +
recurso + '</br></p>')
Response += ('<a href=' + i.content_url + '>' + i.nombre +
'</a><br>')
Response += 'Descripción: ' + descripcion + '</br>'
Response += 'Accesibilidad: ' + acces + '</br>'
Response += ('Localización: ' + via + ' ' + localizacion +
', nº ' + str(num))
Response += ' ' + localidad + ' (' + str(codigo_postal
) + ')</br>'
Response += ('Ubicación: ' + barrio + ' ' + distrito +
' Coordenadas: ' + str(coordenada_x) + ' , ' + str(
coordenada_y) + '<br><br>')
Response += 'INFORMACIÓN DE CONTACTO </br>'
Response += 'Teléfono: ' + telefono + '</br>'
Response += ('Email: ' + email + '</br>' + list_coments +
'</ol>')
if num_megustas != 0:
Response += '</br><li>Numero de me gustas es: ' + str(
num_megustas) + '<br>'
else:
Response += (
'</br><li>Se el primero en indicar que te gusta la página<br>'
)
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
formulario = '<form action="" method="POST">'
formulario += (
'<br>Puede introducir un comentario si lo desea ' + str(
request.user) + '<br><input type="text" name="Comentario">')
formulario += '<input type="submit" value="Comentar"></form>'
Response += formulario
else:
form_user = 'Para loguearse vaya al botón de Inicio'
megusta = ''
megusta += '<br> Indica que te gusta este aparcamiento</br>'
megusta += '<form action="" method="POST">'
megusta += (
'<button type="submit" name="Me Gusta" value="Me Gusta"> +1 </button></form>'
)
Response += megusta
except ObjectDoesNotExist:
Response = 'Este id no se corresponde con ningún aparcamiento'
c = Context({'lista': Response, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@csrf_exempt
def login_form(request):
formulario = '<form action="login" method="POST">'
formulario += 'Nombre<br><input type="text" name="Usuario"><br>'
formulario += 'Contraseña<br><input type="password" name="Password"><br>'
formulario += '<br><input type="submit" value="Entrar"></form>'
return formulario
@csrf_exempt
def loginuser(request):
username = request.POST['Usuario']
password = request.POST['Password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
direcc = '/' + str(user)
return redirect(direcc)
else:
Error = 'Por favor, introduzca un usuario y contraseña válidos'
template = get_template('fail.html')
c = Context({'Error': Error})
renderizado = template.render(c)
return HttpResponse(renderizado)
def lista_megustas():
lista_todos = Aparcamiento.objects.all()
lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]
Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'
Existe = False
for i in lista_ordenada:
megustas = i.contador_megusta
if megustas != 0:
Response += ('<li><a href=' + i.content_url + '>' + i.nombre +
'<br></a>')
Response += ('Dirección: ' + i.clase_vial + ' ' + i.
localizacion + ', nº ' + str(i.num))
Response += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.entidad + '>' + 'Más información<br></a><br>')
Existe = True
if Existe == False:
Response += (
'Aún no se han registrado comentarios para ningún aparcamiento')
Response += '</br></br>'
return Response
<|reserved_special_token_0|>
def lista_aparcamientos():
lista = ''
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento + '<a href="' +
url_aparcamiento + '">\t--> Más información</a></p></li>')
return lista
def aparcamientos_seleccionados(user, request):
user_object = User.objects.get(username=user)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
paginator = Paginator(lista_seleccionados, 5)
page = request.GET.get('page')
try:
seleccionados = paginator.page(page)
except PageNotAnInteger:
seleccionados = paginator.page(1)
except EmptyPage:
seleccionados = paginator.page(paginator.num_pages)
lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'
for i in seleccionados:
lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)
lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i
.aparcamiento.nombre + '<br></a>')
lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.
aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))
lista += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.aparcamiento.entidad + '>' + 'Más información</a><br>')
except ObjectDoesNotExist:
lista = 'El usuario aún no ha seleccionado ningún aparcamiento'
seleccionados = ''
return lista, seleccionados
def accesibles(value):
accesibles = '<form action="" method="POST">'
accesibles += '<button type="submit" name="Accesible" value="' + str(value
) + '"> Accesibles</button></form>'
return accesibles
@csrf_exempt
def pagina_principal(request):
formulario = login_form(request)
list_megustas = lista_megustas()
users = paginas_personales()
value = 1
accesible = accesibles(value)
template = get_template('index.html')
if request.user.is_authenticated():
username = str(request.user)
formulario = 'Bienvenido ' + username
formulario += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
if key == 'Accesible':
value = request.POST['Accesible']
if value == '1':
lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)
lista = '<a href="http://localhost:1234/" > Volver </a>'
value = 0
for i in lista_accesibles:
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista += ('<li><p>' + nombre_aparcamiento +
'</p><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
else:
lista = '<a href="http://localhost:1234/" > Volver </a>'
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento +
'. URL del aparcamiento: ' +
'<a href="aparcamientos/' + url_aparcamiento +
'">\t⇾ Más información</a></br></p>')
value = 1
accesible = accesibles(value)
c = Context({'login': formulario, 'list_users': lista,
'accesible': accesible})
else:
init = Aparcamiento.objects.all()
if len(init) == 0:
get_data()
c = Context({'login': formulario, 'list': list_megustas,
'list_users': users, 'accesible': accesible})
renderizado = template.render(c)
return HttpResponse(renderizado)
<|reserved_special_token_0|>
@csrf_exempt
def usuarios(request, peticion):
formulario = '<form action="" method="POST">'
formulario += (
'<br>Introduzca un título nuevo a su página personal<br><input type="text" name="Titulo">'
)
formulario += '<input type="submit" value=" Actualizar"></form>'
css = '<form action="" method="POST">'
css += 'Modifique el tamaño de letra<br><input type="text" name="Letra">'
css += (
'<br><br>Modifique el color de letra\t<input type="color" name="Color"><br>'
)
css += '<br><input type="submit" value="Modificar"></form>'
aparcamientos = Aparcamiento.objects.all()
lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
lista += nombre_aparcamiento
lista += '<form action="" method="POST">'
lista += ('<button type="submit" name="Seleccionar" value="' +
nombre_aparcamiento + '">Seleccionar</button><br></form>')
user_object = User.objects.get(username=peticion)
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
if key == 'Titulo':
titulo = request.POST['Titulo']
try:
user = Usuario.objects.get(nombre=user_object)
user.titulo_pagina = titulo
user.save()
except ObjectDoesNotExist:
p = Usuario(nombre=user_object, titulo_pagina=titulo)
p.save()
elif key == 'Seleccionar':
nombre_aparcamiento = request.POST['Seleccionar']
today = datetime.datetime.today()
try:
selector = Usuario.objects.get(nombre=user_object)
aparcamiento = Aparcamiento.objects.get(nombre=
nombre_aparcamiento)
except:
p = Usuario(nombre=user_object)
p.save()
selector = Usuario.objects.get(nombre=user_object)
Check = False
lista_usuario = Seleccionados.objects.filter(selector=selector)
for i in lista_usuario:
if nombre_aparcamiento == i.aparcamiento.nombre:
Check = True
if Check == False:
p = Seleccionados(aparcamiento=aparcamiento, selector=
selector, fecha_seleccion=today)
p.save()
elif key == 'Letra':
letra = request.POST['Letra']
color = request.POST['Color']
try:
user = Usuario.objects.get(nombre=user_object)
except:
p = Usuario(nombre=user_object)
p.save()
user = Usuario.objects.get(nombre=user_object)
if letra == '':
letra = '15'
user.letra = letra
user.color = color
user.save()
lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,
request)
if request.user.is_authenticated():
username = str(request.user)
if peticion != username:
template = get_template('publicuser.html')
titulo_pagina = 'Página pública de ' + peticion + '<br><br>'
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
c = Context({'lista_selecc': lista_seleccionados,
'seleccionados': seleccionados, 'titulo': titulo_pagina,
'login': form_user})
else:
template = get_template('privateuser.html')
try:
titulo_pagina = Usuario.objects.get(nombre=user_object
).titulo_pagina
except ObjectDoesNotExist:
titulo_pagina = 'Página personal de ' + str(request.user
) + '<br><br>'
c = Context({'lista_selecc': lista_seleccionados,
'seleccionados': seleccionados, 'lista': lista, 'form':
formulario, 'css': css, 'titulo': titulo_pagina})
else:
template = get_template('publicuser.html')
titulo_pagina = 'Página pública de ' + peticion + '<br><br>'
form_user = 'Para loguearse vaya al botón de Inicio'
c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':
seleccionados, 'titulo': titulo_pagina, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def personalizar(request):
if request.user.is_authenticated():
user_object = User.objects.get(username=request.user)
user = Usuario.objects.get(nombre=user_object)
letra = user.letra
color = user.color
else:
letra = '14px'
color = '#FCFCFC'
css = get_template('change.css')
c = Context({'letra': letra, 'color': color})
renderizado = css.render(c)
return HttpResponse(renderizado, content_type='text/css')
<|reserved_special_token_0|>
@csrf_exempt
def aparcamientos(request):
lista = lista_aparcamientos()
filtrar = '<form action="" method="POST">'
filtrar += '<br><br><input type="text" name="distrito">'
filtrar += '<input type="submit" value="Filtrar por distrito">'
template = get_template('aparcamientos.html')
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
else:
form_user = 'Para loguearse vaya al botón de Inicio'
if request.method == 'POST':
filtro_distrito = request.POST['distrito']
filtro_distrito = filtro_distrito.upper()
if filtro_distrito == '':
lista_filtrada = (
'No ha introducido ningún filtro, introduzca distrito para filtrar '
+ lista)
else:
aparcamientos_filtrados = Aparcamiento.objects.all()
Encontrado = False
lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +
' son: ')
for i in aparcamientos_filtrados:
if filtro_distrito == i.distrito:
Encontrado = True
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista_filtrada += ('<p>' + nombre_aparcamiento +
'</p><li><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
if Encontrado == False:
lista_filtrada = ('Introduzca un nuevo distrito. ' +
filtro_distrito + ' no es válido')
c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':
form_user})
else:
c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
@csrf_exempt
def aparcamientos_id(request, recurso):
template = get_template('aparcamientos.html')
num_megustas = 0
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
print(key)
if key == 'Me+Gusta':
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1
aparcamiento.save()
num_megustas = aparcamiento.contador_megusta
else:
coment = request.POST['Comentario']
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_coments = aparcamiento.contador_coments + 1
aparcamiento.save()
p = Comentario(aparcamiento=aparcamiento, coment=coment)
p.save()
try:
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
nombre = aparcamiento.nombre
descripcion = aparcamiento.descripcion
accesibilidad = aparcamiento.accesibilidad
localizacion = aparcamiento.localizacion
via = aparcamiento.clase_vial
num = aparcamiento.num
localidad = aparcamiento.localidad
provincia = aparcamiento.provincia
codigo_postal = aparcamiento.codigo_postal
barrio = aparcamiento.barrio
distrito = aparcamiento.distrito
coordenada_x = aparcamiento.coordenada_x
coordenada_y = aparcamiento.coordenada_y
telefono = aparcamiento.telefono
email = aparcamiento.email
if telefono == '':
telefono = 'No disponible'
if email == '':
email = 'No disponible'
if accesibilidad == 1:
acces = 'Libre'
else:
acces = 'Ocupado'
lista_aparcamientos = Aparcamiento.objects.all()
list_coments = ''
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
num_megustas = aparcamiento.contador_megusta
for i in lista_aparcamientos:
if i.entidad == recurso:
comentarios = Comentario.objects.filter(aparcamiento=i)
if len(comentarios) != 0:
list_coments = '<li><p>COMENTARIOS</p><ol>'
for j in comentarios:
list_coments += '<li>' + j.coment + '<br>'
Response = (
'<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +
recurso + '</br></p>')
Response += ('<a href=' + i.content_url + '>' + i.nombre +
'</a><br>')
Response += 'Descripción: ' + descripcion + '</br>'
Response += 'Accesibilidad: ' + acces + '</br>'
Response += ('Localización: ' + via + ' ' + localizacion +
', nº ' + str(num))
Response += ' ' + localidad + ' (' + str(codigo_postal
) + ')</br>'
Response += ('Ubicación: ' + barrio + ' ' + distrito +
' Coordenadas: ' + str(coordenada_x) + ' , ' + str(
coordenada_y) + '<br><br>')
Response += 'INFORMACIÓN DE CONTACTO </br>'
Response += 'Teléfono: ' + telefono + '</br>'
Response += ('Email: ' + email + '</br>' + list_coments +
'</ol>')
if num_megustas != 0:
Response += '</br><li>Numero de me gustas es: ' + str(
num_megustas) + '<br>'
else:
Response += (
'</br><li>Se el primero en indicar que te gusta la página<br>'
)
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
formulario = '<form action="" method="POST">'
formulario += (
'<br>Puede introducir un comentario si lo desea ' + str(
request.user) + '<br><input type="text" name="Comentario">')
formulario += '<input type="submit" value="Comentar"></form>'
Response += formulario
else:
form_user = 'Para loguearse vaya al botón de Inicio'
megusta = ''
megusta += '<br> Indica que te gusta este aparcamiento</br>'
megusta += '<form action="" method="POST">'
megusta += (
'<button type="submit" name="Me Gusta" value="Me Gusta"> +1 </button></form>'
)
Response += megusta
except ObjectDoesNotExist:
Response = 'Este id no se corresponde con ningún aparcamiento'
c = Context({'lista': Response, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def about(request):
template = get_template('about.html')
Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'
Cuerpo += (
'------------------------------------ Página principal ---------------------------------------------------'
)
Cuerpo += (
'<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'
)
Cuerpo += (
"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>"
)
Cuerpo += (
'<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'
)
Cuerpo += (
'<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Página con los aparcamientos ---------------------------------------------------'
)
Cuerpo += (
"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>"
)
Cuerpo += (
"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>"
)
Cuerpo += (
'<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Interfaz pública de usuario ---------------------------------------------------'
)
Cuerpo += (
'<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'
)
Cuerpo += (
'<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'
)
Cuerpo += (
'------------------------------------ Interfaz privada de usuario ---------------------------------------------------'
)
Cuerpo += (
'<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'
)
Cuerpo += (
"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>"
)
Cuerpo += (
'<li> Formulario para cambiar el título de su página personal.</li>')
Cuerpo += (
'<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'
)
Cuerpo += (
"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>"
)
Cuerpo += (
'------------------------------------ Pie de pagina ---------------------------------------------------'
)
Cuerpo += (
'<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'
)
Cuerpo += (
'<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Página XML de un usuario ---------------------------------------------------'
)
Cuerpo += (
"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>"
)
c = Context({'lista': Cuerpo})
renderizado = template.render(c)
return HttpResponse(renderizado)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@csrf_exempt
def login_form(request):
formulario = '<form action="login" method="POST">'
formulario += 'Nombre<br><input type="text" name="Usuario"><br>'
formulario += 'Contraseña<br><input type="password" name="Password"><br>'
formulario += '<br><input type="submit" value="Entrar"></form>'
return formulario
@csrf_exempt
def loginuser(request):
username = request.POST['Usuario']
password = request.POST['Password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
direcc = '/' + str(user)
return redirect(direcc)
else:
Error = 'Por favor, introduzca un usuario y contraseña válidos'
template = get_template('fail.html')
c = Context({'Error': Error})
renderizado = template.render(c)
return HttpResponse(renderizado)
def lista_megustas():
lista_todos = Aparcamiento.objects.all()
lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]
Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'
Existe = False
for i in lista_ordenada:
megustas = i.contador_megusta
if megustas != 0:
Response += ('<li><a href=' + i.content_url + '>' + i.nombre +
'<br></a>')
Response += ('Dirección: ' + i.clase_vial + ' ' + i.
localizacion + ', nº ' + str(i.num))
Response += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.entidad + '>' + 'Más información<br></a><br>')
Existe = True
if Existe == False:
Response += (
'Aún no se han registrado comentarios para ningún aparcamiento')
Response += '</br></br>'
return Response
<|reserved_special_token_0|>
def lista_aparcamientos():
lista = ''
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento + '<a href="' +
url_aparcamiento + '">\t--> Más información</a></p></li>')
return lista
def aparcamientos_seleccionados(user, request):
user_object = User.objects.get(username=user)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
paginator = Paginator(lista_seleccionados, 5)
page = request.GET.get('page')
try:
seleccionados = paginator.page(page)
except PageNotAnInteger:
seleccionados = paginator.page(1)
except EmptyPage:
seleccionados = paginator.page(paginator.num_pages)
lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'
for i in seleccionados:
lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)
lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i
.aparcamiento.nombre + '<br></a>')
lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.
aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))
lista += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.aparcamiento.entidad + '>' + 'Más información</a><br>')
except ObjectDoesNotExist:
lista = 'El usuario aún no ha seleccionado ningún aparcamiento'
seleccionados = ''
return lista, seleccionados
def accesibles(value):
accesibles = '<form action="" method="POST">'
accesibles += '<button type="submit" name="Accesible" value="' + str(value
) + '"> Accesibles</button></form>'
return accesibles
@csrf_exempt
def pagina_principal(request):
formulario = login_form(request)
list_megustas = lista_megustas()
users = paginas_personales()
value = 1
accesible = accesibles(value)
template = get_template('index.html')
if request.user.is_authenticated():
username = str(request.user)
formulario = 'Bienvenido ' + username
formulario += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
if key == 'Accesible':
value = request.POST['Accesible']
if value == '1':
lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)
lista = '<a href="http://localhost:1234/" > Volver </a>'
value = 0
for i in lista_accesibles:
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista += ('<li><p>' + nombre_aparcamiento +
'</p><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
else:
lista = '<a href="http://localhost:1234/" > Volver </a>'
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento +
'. URL del aparcamiento: ' +
'<a href="aparcamientos/' + url_aparcamiento +
'">\t⇾ Más información</a></br></p>')
value = 1
accesible = accesibles(value)
c = Context({'login': formulario, 'list_users': lista,
'accesible': accesible})
else:
init = Aparcamiento.objects.all()
if len(init) == 0:
get_data()
c = Context({'login': formulario, 'list': list_megustas,
'list_users': users, 'accesible': accesible})
renderizado = template.render(c)
return HttpResponse(renderizado)
<|reserved_special_token_0|>
@csrf_exempt
def usuarios(request, peticion):
formulario = '<form action="" method="POST">'
formulario += (
'<br>Introduzca un título nuevo a su página personal<br><input type="text" name="Titulo">'
)
formulario += '<input type="submit" value=" Actualizar"></form>'
css = '<form action="" method="POST">'
css += 'Modifique el tamaño de letra<br><input type="text" name="Letra">'
css += (
'<br><br>Modifique el color de letra\t<input type="color" name="Color"><br>'
)
css += '<br><input type="submit" value="Modificar"></form>'
aparcamientos = Aparcamiento.objects.all()
lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
lista += nombre_aparcamiento
lista += '<form action="" method="POST">'
lista += ('<button type="submit" name="Seleccionar" value="' +
nombre_aparcamiento + '">Seleccionar</button><br></form>')
user_object = User.objects.get(username=peticion)
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
if key == 'Titulo':
titulo = request.POST['Titulo']
try:
user = Usuario.objects.get(nombre=user_object)
user.titulo_pagina = titulo
user.save()
except ObjectDoesNotExist:
p = Usuario(nombre=user_object, titulo_pagina=titulo)
p.save()
elif key == 'Seleccionar':
nombre_aparcamiento = request.POST['Seleccionar']
today = datetime.datetime.today()
try:
selector = Usuario.objects.get(nombre=user_object)
aparcamiento = Aparcamiento.objects.get(nombre=
nombre_aparcamiento)
except:
p = Usuario(nombre=user_object)
p.save()
selector = Usuario.objects.get(nombre=user_object)
Check = False
lista_usuario = Seleccionados.objects.filter(selector=selector)
for i in lista_usuario:
if nombre_aparcamiento == i.aparcamiento.nombre:
Check = True
if Check == False:
p = Seleccionados(aparcamiento=aparcamiento, selector=
selector, fecha_seleccion=today)
p.save()
elif key == 'Letra':
letra = request.POST['Letra']
color = request.POST['Color']
try:
user = Usuario.objects.get(nombre=user_object)
except:
p = Usuario(nombre=user_object)
p.save()
user = Usuario.objects.get(nombre=user_object)
if letra == '':
letra = '15'
user.letra = letra
user.color = color
user.save()
lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,
request)
if request.user.is_authenticated():
username = str(request.user)
if peticion != username:
template = get_template('publicuser.html')
titulo_pagina = 'Página pública de ' + peticion + '<br><br>'
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
c = Context({'lista_selecc': lista_seleccionados,
'seleccionados': seleccionados, 'titulo': titulo_pagina,
'login': form_user})
else:
template = get_template('privateuser.html')
try:
titulo_pagina = Usuario.objects.get(nombre=user_object
).titulo_pagina
except ObjectDoesNotExist:
titulo_pagina = 'Página personal de ' + str(request.user
) + '<br><br>'
c = Context({'lista_selecc': lista_seleccionados,
'seleccionados': seleccionados, 'lista': lista, 'form':
formulario, 'css': css, 'titulo': titulo_pagina})
else:
template = get_template('publicuser.html')
titulo_pagina = 'Página pública de ' + peticion + '<br><br>'
form_user = 'Para loguearse vaya al botón de Inicio'
c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':
seleccionados, 'titulo': titulo_pagina, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def personalizar(request):
if request.user.is_authenticated():
user_object = User.objects.get(username=request.user)
user = Usuario.objects.get(nombre=user_object)
letra = user.letra
color = user.color
else:
letra = '14px'
color = '#FCFCFC'
css = get_template('change.css')
c = Context({'letra': letra, 'color': color})
renderizado = css.render(c)
return HttpResponse(renderizado, content_type='text/css')
def usuarios_xml(request, peticion):
user_object = User.objects.get(username=peticion)
doc = Document()
cont = doc.createElement('Contenidos')
doc.appendChild(cont)
info = doc.createElement('infoDataset')
cont.appendChild(info)
nombre = doc.createElement('Nombre')
info.appendChild(nombre)
ptext = doc.createTextNode(
'XML de aparcamientos seleccionados por el usuario ' + peticion)
nombre.appendChild(ptext)
url = doc.createElement('url')
info.appendChild(url)
ptext = doc.createTextNode('http://localhost:1234/' + peticion + '/xml/')
url.appendChild(ptext)
aparc = doc.createElement('Aparcamientos')
cont.appendChild(aparc)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
for i in lista_seleccionados:
item = doc.createElement('Contenido')
aparc.appendChild(item)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'ID-ENTIDAD')
ptext = doc.createTextNode(i.aparcamiento.entidad)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'NOMBRE')
ptext = doc.createTextNode(i.aparcamiento.nombre)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'DESCRIPCION')
ptext = doc.createTextNode(i.aparcamiento.descripcion)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'ACCESIBILIDAD')
if i.aparcamiento.accesibilidad == True:
acces = 1
else:
acces = 0
ptext = doc.createTextNode(str(acces))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'CONTENT_URL')
ptext = doc.createTextNode(i.aparcamiento.content_url)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'LOCALIZACION')
ptext = doc.createTextNode(i.aparcamiento.localizacion)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'CLASE VIAL')
ptext = doc.createTextNode(i.aparcamiento.clase_vial)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'TIPO NUM')
ptext = doc.createTextNode(i.aparcamiento.tipo_num)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'NUM')
ptext = doc.createTextNode(str(i.aparcamiento.num))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'LOCALIDAD')
ptext = doc.createTextNode(i.aparcamiento.localidad)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'PROVINCIA')
ptext = doc.createTextNode(i.aparcamiento.provincia)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'CODIGO POSTAL')
ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'BARRIO')
ptext = doc.createTextNode(i.aparcamiento.barrio)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'DISTRITO')
ptext = doc.createTextNode(i.aparcamiento.distrito)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'COORDENADA X')
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'COORDENADA Y')
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
datos = doc.createElement('DATOSDECONTACTO')
item.appendChild(datos)
atributo = doc.createElement('atributo')
datos.appendChild(atributo)
atributo.setAttribute('nombre', 'TELEFONO')
ptext = doc.createTextNode(i.aparcamiento.telefono)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
datos.appendChild(atributo)
atributo.setAttribute('nombre', 'EMAIL')
ptext = doc.createTextNode(i.aparcamiento.email)
atributo.appendChild(ptext)
except:
print('')
xml = doc.toprettyxml(indent=' ')
return HttpResponse(xml, content_type='text/xml')
@csrf_exempt
def aparcamientos(request):
lista = lista_aparcamientos()
filtrar = '<form action="" method="POST">'
filtrar += '<br><br><input type="text" name="distrito">'
filtrar += '<input type="submit" value="Filtrar por distrito">'
template = get_template('aparcamientos.html')
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
else:
form_user = 'Para loguearse vaya al botón de Inicio'
if request.method == 'POST':
filtro_distrito = request.POST['distrito']
filtro_distrito = filtro_distrito.upper()
if filtro_distrito == '':
lista_filtrada = (
'No ha introducido ningún filtro, introduzca distrito para filtrar '
+ lista)
else:
aparcamientos_filtrados = Aparcamiento.objects.all()
Encontrado = False
lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +
' son: ')
for i in aparcamientos_filtrados:
if filtro_distrito == i.distrito:
Encontrado = True
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista_filtrada += ('<p>' + nombre_aparcamiento +
'</p><li><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
if Encontrado == False:
lista_filtrada = ('Introduzca un nuevo distrito. ' +
filtro_distrito + ' no es válido')
c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':
form_user})
else:
c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
@csrf_exempt
def aparcamientos_id(request, recurso):
template = get_template('aparcamientos.html')
num_megustas = 0
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
print(key)
if key == 'Me+Gusta':
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1
aparcamiento.save()
num_megustas = aparcamiento.contador_megusta
else:
coment = request.POST['Comentario']
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_coments = aparcamiento.contador_coments + 1
aparcamiento.save()
p = Comentario(aparcamiento=aparcamiento, coment=coment)
p.save()
try:
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
nombre = aparcamiento.nombre
descripcion = aparcamiento.descripcion
accesibilidad = aparcamiento.accesibilidad
localizacion = aparcamiento.localizacion
via = aparcamiento.clase_vial
num = aparcamiento.num
localidad = aparcamiento.localidad
provincia = aparcamiento.provincia
codigo_postal = aparcamiento.codigo_postal
barrio = aparcamiento.barrio
distrito = aparcamiento.distrito
coordenada_x = aparcamiento.coordenada_x
coordenada_y = aparcamiento.coordenada_y
telefono = aparcamiento.telefono
email = aparcamiento.email
if telefono == '':
telefono = 'No disponible'
if email == '':
email = 'No disponible'
if accesibilidad == 1:
acces = 'Libre'
else:
acces = 'Ocupado'
lista_aparcamientos = Aparcamiento.objects.all()
list_coments = ''
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
num_megustas = aparcamiento.contador_megusta
for i in lista_aparcamientos:
if i.entidad == recurso:
comentarios = Comentario.objects.filter(aparcamiento=i)
if len(comentarios) != 0:
list_coments = '<li><p>COMENTARIOS</p><ol>'
for j in comentarios:
list_coments += '<li>' + j.coment + '<br>'
Response = (
'<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +
recurso + '</br></p>')
Response += ('<a href=' + i.content_url + '>' + i.nombre +
'</a><br>')
Response += 'Descripción: ' + descripcion + '</br>'
Response += 'Accesibilidad: ' + acces + '</br>'
Response += ('Localización: ' + via + ' ' + localizacion +
', nº ' + str(num))
Response += ' ' + localidad + ' (' + str(codigo_postal
) + ')</br>'
Response += ('Ubicación: ' + barrio + ' ' + distrito +
' Coordenadas: ' + str(coordenada_x) + ' , ' + str(
coordenada_y) + '<br><br>')
Response += 'INFORMACIÓN DE CONTACTO </br>'
Response += 'Teléfono: ' + telefono + '</br>'
Response += ('Email: ' + email + '</br>' + list_coments +
'</ol>')
if num_megustas != 0:
Response += '</br><li>Numero de me gustas es: ' + str(
num_megustas) + '<br>'
else:
Response += (
'</br><li>Se el primero en indicar que te gusta la página<br>'
)
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
formulario = '<form action="" method="POST">'
formulario += (
'<br>Puede introducir un comentario si lo desea ' + str(
request.user) + '<br><input type="text" name="Comentario">')
formulario += '<input type="submit" value="Comentar"></form>'
Response += formulario
else:
form_user = 'Para loguearse vaya al botón de Inicio'
megusta = ''
megusta += '<br> Indica que te gusta este aparcamiento</br>'
megusta += '<form action="" method="POST">'
megusta += (
'<button type="submit" name="Me Gusta" value="Me Gusta"> +1 </button></form>'
)
Response += megusta
except ObjectDoesNotExist:
Response = 'Este id no se corresponde con ningún aparcamiento'
c = Context({'lista': Response, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def about(request):
template = get_template('about.html')
Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'
Cuerpo += (
'------------------------------------ Página principal ---------------------------------------------------'
)
Cuerpo += (
'<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'
)
Cuerpo += (
"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>"
)
Cuerpo += (
'<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'
)
Cuerpo += (
'<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Página con los aparcamientos ---------------------------------------------------'
)
Cuerpo += (
"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>"
)
Cuerpo += (
"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>"
)
Cuerpo += (
'<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Interfaz pública de usuario ---------------------------------------------------'
)
Cuerpo += (
'<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'
)
Cuerpo += (
'<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'
)
Cuerpo += (
'------------------------------------ Interfaz privada de usuario ---------------------------------------------------'
)
Cuerpo += (
'<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'
)
Cuerpo += (
"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>"
)
Cuerpo += (
'<li> Formulario para cambiar el título de su página personal.</li>')
Cuerpo += (
'<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'
)
Cuerpo += (
"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>"
)
Cuerpo += (
'------------------------------------ Pie de pagina ---------------------------------------------------'
)
Cuerpo += (
'<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'
)
Cuerpo += (
'<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Página XML de un usuario ---------------------------------------------------'
)
Cuerpo += (
"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>"
)
c = Context({'lista': Cuerpo})
renderizado = template.render(c)
return HttpResponse(renderizado)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@csrf_exempt
def login_form(request):
formulario = '<form action="login" method="POST">'
formulario += 'Nombre<br><input type="text" name="Usuario"><br>'
formulario += 'Contraseña<br><input type="password" name="Password"><br>'
formulario += '<br><input type="submit" value="Entrar"></form>'
return formulario
@csrf_exempt
def loginuser(request):
username = request.POST['Usuario']
password = request.POST['Password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
direcc = '/' + str(user)
return redirect(direcc)
else:
Error = 'Por favor, introduzca un usuario y contraseña válidos'
template = get_template('fail.html')
c = Context({'Error': Error})
renderizado = template.render(c)
return HttpResponse(renderizado)
def lista_megustas():
lista_todos = Aparcamiento.objects.all()
lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]
Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'
Existe = False
for i in lista_ordenada:
megustas = i.contador_megusta
if megustas != 0:
Response += ('<li><a href=' + i.content_url + '>' + i.nombre +
'<br></a>')
Response += ('Dirección: ' + i.clase_vial + ' ' + i.
localizacion + ', nº ' + str(i.num))
Response += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.entidad + '>' + 'Más información<br></a><br>')
Existe = True
if Existe == False:
Response += (
'Aún no se han registrado comentarios para ningún aparcamiento')
Response += '</br></br>'
return Response
<|reserved_special_token_0|>
def lista_aparcamientos():
lista = ''
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento + '<a href="' +
url_aparcamiento + '">\t--> Más información</a></p></li>')
return lista
def aparcamientos_seleccionados(user, request):
user_object = User.objects.get(username=user)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
paginator = Paginator(lista_seleccionados, 5)
page = request.GET.get('page')
try:
seleccionados = paginator.page(page)
except PageNotAnInteger:
seleccionados = paginator.page(1)
except EmptyPage:
seleccionados = paginator.page(paginator.num_pages)
lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'
for i in seleccionados:
lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)
lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i
.aparcamiento.nombre + '<br></a>')
lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.
aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))
lista += ('<br><a href=http://localhost:1234/aparcamientos/' +
i.aparcamiento.entidad + '>' + 'Más información</a><br>')
except ObjectDoesNotExist:
lista = 'El usuario aún no ha seleccionado ningún aparcamiento'
seleccionados = ''
return lista, seleccionados
def accesibles(value):
accesibles = '<form action="" method="POST">'
accesibles += '<button type="submit" name="Accesible" value="' + str(value
) + '"> Accesibles</button></form>'
return accesibles
@csrf_exempt
def pagina_principal(request):
formulario = login_form(request)
list_megustas = lista_megustas()
users = paginas_personales()
value = 1
accesible = accesibles(value)
template = get_template('index.html')
if request.user.is_authenticated():
username = str(request.user)
formulario = 'Bienvenido ' + username
formulario += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
if key == 'Accesible':
value = request.POST['Accesible']
if value == '1':
lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)
lista = '<a href="http://localhost:1234/" > Volver </a>'
value = 0
for i in lista_accesibles:
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista += ('<li><p>' + nombre_aparcamiento +
'</p><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
else:
lista = '<a href="http://localhost:1234/" > Volver </a>'
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += ('<li><p>' + nombre_aparcamiento +
'. URL del aparcamiento: ' +
'<a href="aparcamientos/' + url_aparcamiento +
'">\t⇾ Más información</a></br></p>')
value = 1
accesible = accesibles(value)
c = Context({'login': formulario, 'list_users': lista,
'accesible': accesible})
else:
init = Aparcamiento.objects.all()
if len(init) == 0:
get_data()
c = Context({'login': formulario, 'list': list_megustas,
'list_users': users, 'accesible': accesible})
renderizado = template.render(c)
return HttpResponse(renderizado)
def mylogout(request):
logout(request)
return redirect('/')
@csrf_exempt
def usuarios(request, peticion):
formulario = '<form action="" method="POST">'
formulario += (
'<br>Introduzca un título nuevo a su página personal<br><input type="text" name="Titulo">'
)
formulario += '<input type="submit" value=" Actualizar"></form>'
css = '<form action="" method="POST">'
css += 'Modifique el tamaño de letra<br><input type="text" name="Letra">'
css += (
'<br><br>Modifique el color de letra\t<input type="color" name="Color"><br>'
)
css += '<br><input type="submit" value="Modificar"></form>'
aparcamientos = Aparcamiento.objects.all()
lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
lista += nombre_aparcamiento
lista += '<form action="" method="POST">'
lista += ('<button type="submit" name="Seleccionar" value="' +
nombre_aparcamiento + '">Seleccionar</button><br></form>')
user_object = User.objects.get(username=peticion)
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
if key == 'Titulo':
titulo = request.POST['Titulo']
try:
user = Usuario.objects.get(nombre=user_object)
user.titulo_pagina = titulo
user.save()
except ObjectDoesNotExist:
p = Usuario(nombre=user_object, titulo_pagina=titulo)
p.save()
elif key == 'Seleccionar':
nombre_aparcamiento = request.POST['Seleccionar']
today = datetime.datetime.today()
try:
selector = Usuario.objects.get(nombre=user_object)
aparcamiento = Aparcamiento.objects.get(nombre=
nombre_aparcamiento)
except:
p = Usuario(nombre=user_object)
p.save()
selector = Usuario.objects.get(nombre=user_object)
Check = False
lista_usuario = Seleccionados.objects.filter(selector=selector)
for i in lista_usuario:
if nombre_aparcamiento == i.aparcamiento.nombre:
Check = True
if Check == False:
p = Seleccionados(aparcamiento=aparcamiento, selector=
selector, fecha_seleccion=today)
p.save()
elif key == 'Letra':
letra = request.POST['Letra']
color = request.POST['Color']
try:
user = Usuario.objects.get(nombre=user_object)
except:
p = Usuario(nombre=user_object)
p.save()
user = Usuario.objects.get(nombre=user_object)
if letra == '':
letra = '15'
user.letra = letra
user.color = color
user.save()
lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,
request)
if request.user.is_authenticated():
username = str(request.user)
if peticion != username:
template = get_template('publicuser.html')
titulo_pagina = 'Página pública de ' + peticion + '<br><br>'
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
c = Context({'lista_selecc': lista_seleccionados,
'seleccionados': seleccionados, 'titulo': titulo_pagina,
'login': form_user})
else:
template = get_template('privateuser.html')
try:
titulo_pagina = Usuario.objects.get(nombre=user_object
).titulo_pagina
except ObjectDoesNotExist:
titulo_pagina = 'Página personal de ' + str(request.user
) + '<br><br>'
c = Context({'lista_selecc': lista_seleccionados,
'seleccionados': seleccionados, 'lista': lista, 'form':
formulario, 'css': css, 'titulo': titulo_pagina})
else:
template = get_template('publicuser.html')
titulo_pagina = 'Página pública de ' + peticion + '<br><br>'
form_user = 'Para loguearse vaya al botón de Inicio'
c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':
seleccionados, 'titulo': titulo_pagina, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def personalizar(request):
if request.user.is_authenticated():
user_object = User.objects.get(username=request.user)
user = Usuario.objects.get(nombre=user_object)
letra = user.letra
color = user.color
else:
letra = '14px'
color = '#FCFCFC'
css = get_template('change.css')
c = Context({'letra': letra, 'color': color})
renderizado = css.render(c)
return HttpResponse(renderizado, content_type='text/css')
def usuarios_xml(request, peticion):
user_object = User.objects.get(username=peticion)
doc = Document()
cont = doc.createElement('Contenidos')
doc.appendChild(cont)
info = doc.createElement('infoDataset')
cont.appendChild(info)
nombre = doc.createElement('Nombre')
info.appendChild(nombre)
ptext = doc.createTextNode(
'XML de aparcamientos seleccionados por el usuario ' + peticion)
nombre.appendChild(ptext)
url = doc.createElement('url')
info.appendChild(url)
ptext = doc.createTextNode('http://localhost:1234/' + peticion + '/xml/')
url.appendChild(ptext)
aparc = doc.createElement('Aparcamientos')
cont.appendChild(aparc)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
for i in lista_seleccionados:
item = doc.createElement('Contenido')
aparc.appendChild(item)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'ID-ENTIDAD')
ptext = doc.createTextNode(i.aparcamiento.entidad)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'NOMBRE')
ptext = doc.createTextNode(i.aparcamiento.nombre)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'DESCRIPCION')
ptext = doc.createTextNode(i.aparcamiento.descripcion)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'ACCESIBILIDAD')
if i.aparcamiento.accesibilidad == True:
acces = 1
else:
acces = 0
ptext = doc.createTextNode(str(acces))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'CONTENT_URL')
ptext = doc.createTextNode(i.aparcamiento.content_url)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'LOCALIZACION')
ptext = doc.createTextNode(i.aparcamiento.localizacion)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'CLASE VIAL')
ptext = doc.createTextNode(i.aparcamiento.clase_vial)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'TIPO NUM')
ptext = doc.createTextNode(i.aparcamiento.tipo_num)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'NUM')
ptext = doc.createTextNode(str(i.aparcamiento.num))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'LOCALIDAD')
ptext = doc.createTextNode(i.aparcamiento.localidad)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'PROVINCIA')
ptext = doc.createTextNode(i.aparcamiento.provincia)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'CODIGO POSTAL')
ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'BARRIO')
ptext = doc.createTextNode(i.aparcamiento.barrio)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'DISTRITO')
ptext = doc.createTextNode(i.aparcamiento.distrito)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'COORDENADA X')
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
atributo.setAttribute('nombre', 'COORDENADA Y')
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
item.appendChild(atributo)
datos = doc.createElement('DATOSDECONTACTO')
item.appendChild(datos)
atributo = doc.createElement('atributo')
datos.appendChild(atributo)
atributo.setAttribute('nombre', 'TELEFONO')
ptext = doc.createTextNode(i.aparcamiento.telefono)
atributo.appendChild(ptext)
atributo = doc.createElement('atributo')
datos.appendChild(atributo)
atributo.setAttribute('nombre', 'EMAIL')
ptext = doc.createTextNode(i.aparcamiento.email)
atributo.appendChild(ptext)
except:
print('')
xml = doc.toprettyxml(indent=' ')
return HttpResponse(xml, content_type='text/xml')
@csrf_exempt
def aparcamientos(request):
lista = lista_aparcamientos()
filtrar = '<form action="" method="POST">'
filtrar += '<br><br><input type="text" name="distrito">'
filtrar += '<input type="submit" value="Filtrar por distrito">'
template = get_template('aparcamientos.html')
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
else:
form_user = 'Para loguearse vaya al botón de Inicio'
if request.method == 'POST':
filtro_distrito = request.POST['distrito']
filtro_distrito = filtro_distrito.upper()
if filtro_distrito == '':
lista_filtrada = (
'No ha introducido ningún filtro, introduzca distrito para filtrar '
+ lista)
else:
aparcamientos_filtrados = Aparcamiento.objects.all()
Encontrado = False
lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +
' son: ')
for i in aparcamientos_filtrados:
if filtro_distrito == i.distrito:
Encontrado = True
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista_filtrada += ('<p>' + nombre_aparcamiento +
'</p><li><a href=' + url_aparcamiento + '>' +
url_aparcamiento + '</a></li>')
if Encontrado == False:
lista_filtrada = ('Introduzca un nuevo distrito. ' +
filtro_distrito + ' no es válido')
c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':
form_user})
else:
c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
@csrf_exempt
def aparcamientos_id(request, recurso):
template = get_template('aparcamientos.html')
num_megustas = 0
if request.method == 'POST':
key = request.body.decode('utf-8').split('=')[0]
print(key)
if key == 'Me+Gusta':
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1
aparcamiento.save()
num_megustas = aparcamiento.contador_megusta
else:
coment = request.POST['Comentario']
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_coments = aparcamiento.contador_coments + 1
aparcamiento.save()
p = Comentario(aparcamiento=aparcamiento, coment=coment)
p.save()
try:
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
nombre = aparcamiento.nombre
descripcion = aparcamiento.descripcion
accesibilidad = aparcamiento.accesibilidad
localizacion = aparcamiento.localizacion
via = aparcamiento.clase_vial
num = aparcamiento.num
localidad = aparcamiento.localidad
provincia = aparcamiento.provincia
codigo_postal = aparcamiento.codigo_postal
barrio = aparcamiento.barrio
distrito = aparcamiento.distrito
coordenada_x = aparcamiento.coordenada_x
coordenada_y = aparcamiento.coordenada_y
telefono = aparcamiento.telefono
email = aparcamiento.email
if telefono == '':
telefono = 'No disponible'
if email == '':
email = 'No disponible'
if accesibilidad == 1:
acces = 'Libre'
else:
acces = 'Ocupado'
lista_aparcamientos = Aparcamiento.objects.all()
list_coments = ''
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
num_megustas = aparcamiento.contador_megusta
for i in lista_aparcamientos:
if i.entidad == recurso:
comentarios = Comentario.objects.filter(aparcamiento=i)
if len(comentarios) != 0:
list_coments = '<li><p>COMENTARIOS</p><ol>'
for j in comentarios:
list_coments += '<li>' + j.coment + '<br>'
Response = (
'<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +
recurso + '</br></p>')
Response += ('<a href=' + i.content_url + '>' + i.nombre +
'</a><br>')
Response += 'Descripción: ' + descripcion + '</br>'
Response += 'Accesibilidad: ' + acces + '</br>'
Response += ('Localización: ' + via + ' ' + localizacion +
', nº ' + str(num))
Response += ' ' + localidad + ' (' + str(codigo_postal
) + ')</br>'
Response += ('Ubicación: ' + barrio + ' ' + distrito +
' Coordenadas: ' + str(coordenada_x) + ' , ' + str(
coordenada_y) + '<br><br>')
Response += 'INFORMACIÓN DE CONTACTO </br>'
Response += 'Teléfono: ' + telefono + '</br>'
Response += ('Email: ' + email + '</br>' + list_coments +
'</ol>')
if num_megustas != 0:
Response += '</br><li>Numero de me gustas es: ' + str(
num_megustas) + '<br>'
else:
Response += (
'</br><li>Se el primero en indicar que te gusta la página<br>'
)
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += (
'<br><br><a href="http://localhost:1234/logout" > Logout </a>')
formulario = '<form action="" method="POST">'
formulario += (
'<br>Puede introducir un comentario si lo desea ' + str(
request.user) + '<br><input type="text" name="Comentario">')
formulario += '<input type="submit" value="Comentar"></form>'
Response += formulario
else:
form_user = 'Para loguearse vaya al botón de Inicio'
megusta = ''
megusta += '<br> Indica que te gusta este aparcamiento</br>'
megusta += '<form action="" method="POST">'
megusta += (
'<button type="submit" name="Me Gusta" value="Me Gusta"> +1 </button></form>'
)
Response += megusta
except ObjectDoesNotExist:
Response = 'Este id no se corresponde con ningún aparcamiento'
c = Context({'lista': Response, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def about(request):
template = get_template('about.html')
Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'
Cuerpo += (
'------------------------------------ Página principal ---------------------------------------------------'
)
Cuerpo += (
'<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'
)
Cuerpo += (
"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>"
)
Cuerpo += (
'<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'
)
Cuerpo += (
'<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Página con los aparcamientos ---------------------------------------------------'
)
Cuerpo += (
"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>"
)
Cuerpo += (
"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>"
)
Cuerpo += (
'<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Interfaz pública de usuario ---------------------------------------------------'
)
Cuerpo += (
'<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'
)
Cuerpo += (
'<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'
)
Cuerpo += (
'------------------------------------ Interfaz privada de usuario ---------------------------------------------------'
)
Cuerpo += (
'<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'
)
Cuerpo += (
"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>"
)
Cuerpo += (
'<li> Formulario para cambiar el título de su página personal.</li>')
Cuerpo += (
'<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'
)
Cuerpo += (
"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>"
)
Cuerpo += (
'------------------------------------ Pie de pagina ---------------------------------------------------'
)
Cuerpo += (
'<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'
)
Cuerpo += (
'<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'
)
Cuerpo += (
'------------------------------------ Página XML de un usuario ---------------------------------------------------'
)
Cuerpo += (
"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>"
)
c = Context({'lista': Cuerpo})
renderizado = template.render(c)
return HttpResponse(renderizado)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponse
from .models import *
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import login_required
from django.template.loader import get_template
from django.template import Context
from django.views.decorators.csrf import csrf_exempt
from django.template.context_processors import csrf
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import xml.etree.ElementTree as etree
from xml.dom.minidom import Document, parse
import xml.dom.minidom as dom
import datetime
import sys
from .parser import get_data
from django.http import QueryDict
import urllib
# Create your views here.
@csrf_exempt
def login_form(request):
formulario = '<form action="login" method="POST">'
formulario += 'Nombre<br><input type="text" name="Usuario"><br>'
formulario += 'Contraseña<br><input type="password" name="Password"><br>'
formulario += '<br><input type="submit" value="Entrar"></form>'
return formulario
@csrf_exempt
def loginuser(request):
username = request.POST['Usuario']
password = request.POST['Password']
user = authenticate(username=username, password=password)
if user is not None:
login(request,user)
direcc = '/' + str(user)
return redirect(direcc)
else:
Error = "Por favor, introduzca un usuario y contraseña válidos"
template = get_template("fail.html")
c = Context ({'Error': Error})
renderizado = template.render(c)
return HttpResponse(renderizado)
def lista_megustas():
lista_todos = Aparcamiento.objects.all()
lista_ordenada = lista_todos.order_by("-contador_megusta")[:5]
Response = "LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>"
Existe = False
for i in lista_ordenada:
megustas = i.contador_megusta
#comentarios = Comentario.objects.filter(aparcamiento=i)
if megustas != 0:
Response += "<li><a href=" + i.content_url + ">" + i.nombre + "<br></a>"
Response += "Dirección: " + i.clase_vial + " " + i.localizacion + ", nº " + str(i.num)
Response += "<br><a href=http://localhost:1234/aparcamientos/" + i.entidad + ">" + "Más información<br></a><br>"
Existe = True
if Existe == False:
Response += "Aún no se han registrado comentarios para ningún aparcamiento"
Response += "</br></br>"
return Response
def paginas_personales():
Lista = "PÁGINAS DE USUARIOS<br><br>"
usuarios = User.objects.all()
for i in usuarios:
try:
pagina = Usuario.objects.get(nombre=i.id).titulo_pagina
except ObjectDoesNotExist:
pagina = "Página de " + i.username
Lista += "<a href=http://localhost:1234/" + i.username + ">" + pagina + "</a> Usuario: " + i.username + "<br>"
return Lista
def lista_aparcamientos():
lista = ''
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += '<li><p>' + nombre_aparcamiento + '<a href="' + url_aparcamiento + '"> --> Más información</a></p></li>'
return lista
def aparcamientos_seleccionados(user,request):
user_object = User.objects.get(username=user)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
paginator = Paginator(lista_seleccionados,5)
page = request.GET.get('page')
try:
seleccionados = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
seleccionados = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
seleccionados = paginator.page(paginator.num_pages)
lista = "Listado de aparcamientos seleccionados por " + user + "<br>"
for i in seleccionados:
lista += "<br><li>Fecha de selección: " + str(i.fecha_seleccion)
lista += "<br><a href=" + i.aparcamiento.content_url + ">" + i.aparcamiento.nombre + "<br></a>"
lista += "Dirección: " + i.aparcamiento.clase_vial + " " + i.aparcamiento.localizacion + ", nº " + str(i.aparcamiento.num)
lista += "<br><a href=http://localhost:1234/aparcamientos/" + i.aparcamiento.entidad + ">" + "Más información</a><br>"
except ObjectDoesNotExist:
lista = "El usuario aún no ha seleccionado ningún aparcamiento"
seleccionados = ""
return lista,seleccionados
def accesibles(value):
accesibles = '<form action="" method="POST">'
accesibles += '<button type="submit" name="Accesible" value="' + str(value) + '"> Accesibles</button></form>'
return accesibles
@csrf_exempt
def pagina_principal(request):
formulario = login_form(request)
list_megustas = lista_megustas()
users = paginas_personales()
value = 1
accesible = accesibles(value)
template = get_template("index.html")
if request.user.is_authenticated():
username = str(request.user)
formulario = 'Bienvenido ' + username
formulario += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
if key == 'Accesible':
value = request.POST['Accesible']
if value == '1':
lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)
lista = '<a href="http://localhost:1234/" > Volver </a>'
value = 0
for i in lista_accesibles:
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista += "<li><p>" + nombre_aparcamiento + "</p><a href=" + url_aparcamiento + ">" + url_aparcamiento + "</a></li>"
else:
lista = '<a href="http://localhost:1234/" > Volver </a>'
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += '<li><p>' + nombre_aparcamiento + '. URL del aparcamiento: ' + '<a href="aparcamientos/' + url_aparcamiento + '"> ⇾ Más información</a></br></p>'
value = 1
accesible = accesibles(value)
c = Context({'login': formulario, 'list_users':lista, 'accesible': accesible})
else:
init = Aparcamiento.objects.all()
if len(init) == 0:
get_data()
c = Context({'login': formulario, 'list':list_megustas, 'list_users':users, 'accesible': accesible})
renderizado = template.render(c)
return HttpResponse(renderizado)
def mylogout(request):
logout(request)
return redirect("/")
@csrf_exempt
def usuarios(request, peticion):
formulario = '<form action="" method="POST">'
formulario += '<br>Introduzca un título nuevo a su página personal<br><input type="text" name="Titulo">'
formulario += '<input type="submit" value=" Actualizar"></form>'
css = '<form action="" method="POST">'
css += 'Modifique el tamaño de letra<br><input type="text" name="Letra">'
css += '<br><br>Modifique el color de letra <input type="color" name="Color"><br>'
css += '<br><input type="submit" value="Modificar"></form>'
aparcamientos = Aparcamiento.objects.all()
lista= "<br>LISTADO DE APARCAMIENTOS<br><br>"
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
lista += nombre_aparcamiento
lista += '<form action="" method="POST">'
lista += '<button type="submit" name="Seleccionar" value="' + nombre_aparcamiento + '">Seleccionar</button><br></form>'
user_object= User.objects.get(username=peticion)
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
if key == "Titulo":
titulo = request.POST['Titulo']
try:
user = Usuario.objects.get(nombre=user_object)
user.titulo_pagina = titulo
user.save()
except ObjectDoesNotExist:
p = Usuario(nombre=user_object, titulo_pagina=titulo)
p.save()
elif key == "Seleccionar":
nombre_aparcamiento = request.POST['Seleccionar']
today = datetime.datetime.today()
try:
selector = Usuario.objects.get(nombre=user_object)
aparcamiento = Aparcamiento.objects.get(nombre=nombre_aparcamiento)
except:
p = Usuario(nombre=user_object)
p.save()
selector = Usuario.objects.get(nombre=user_object)
Check = False
lista_usuario = Seleccionados.objects.filter(selector=selector)
for i in lista_usuario:
if nombre_aparcamiento == i.aparcamiento.nombre:
Check=True
if Check == False:
p = Seleccionados(aparcamiento=aparcamiento, selector=selector, fecha_seleccion=today)
p.save()
elif key == "Letra":
letra = request.POST['Letra']
color = request.POST['Color']
try:
user = Usuario.objects.get(nombre=user_object)
except:
p = Usuario(nombre=user_object)
p.save()
user = Usuario.objects.get(nombre=user_object)
if letra == "":
letra = "15"
user.letra = letra
user.color = color
user.save()
lista_seleccionados, seleccionados= aparcamientos_seleccionados(peticion,request)
if request.user.is_authenticated():
username = str(request.user)
if peticion != username: #Si no es igual es que solo puedo acceder a la parte publica, ya qu eno es la mia
template = get_template("publicuser.html")
titulo_pagina = "Página pública de " + peticion + "<br><br>"
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})
else: #Si es igual es que es la mia y puedo acceder a la parte privada, ya que es lamia
template = get_template("privateuser.html")
try:
titulo_pagina = Usuario.objects.get(nombre=user_object).titulo_pagina
except ObjectDoesNotExist:
titulo_pagina = "Página personal de " + str(request.user) + "<br><br>"
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'lista': lista, 'form': formulario, 'css':css, 'titulo': titulo_pagina})
else:
template = get_template("publicuser.html")
titulo_pagina = "Página pública de " + peticion + "<br><br>"
form_user = 'Para loguearse vaya al botón de Inicio'
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def personalizar(request):
if request.user.is_authenticated():
user_object = User.objects.get(username=request.user)
user = Usuario.objects.get(nombre=user_object)
letra = user.letra
color = user.color
else:
letra = "14px"
color = "#FCFCFC"
css = get_template("change.css")
c = Context({'letra':letra, 'color':color})
renderizado = css.render(c)
return HttpResponse(renderizado, content_type="text/css")
def usuarios_xml(request, peticion):
user_object = User.objects.get(username=peticion)
doc = Document()
cont = doc.createElement("Contenidos")
doc.appendChild(cont)
info = doc.createElement("infoDataset")
cont.appendChild(info)
nombre = doc.createElement("Nombre")
info.appendChild(nombre)
ptext = doc.createTextNode("XML de aparcamientos seleccionados por el usuario " + peticion)
nombre.appendChild(ptext)
url = doc.createElement("url")
info.appendChild(url)
ptext = doc.createTextNode("http://localhost:1234/" + peticion + "/xml/")
url.appendChild(ptext)
aparc = doc.createElement("Aparcamientos")
cont.appendChild(aparc)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
for i in lista_seleccionados:
item = doc.createElement("Contenido")
aparc.appendChild(item)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "ID-ENTIDAD")
ptext = doc.createTextNode(i.aparcamiento.entidad)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "NOMBRE")
ptext = doc.createTextNode(i.aparcamiento.nombre)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "DESCRIPCION")
ptext = doc.createTextNode(i.aparcamiento.descripcion)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "ACCESIBILIDAD")
if i.aparcamiento.accesibilidad == True:
acces = 1
else:
acces = 0
ptext = doc.createTextNode(str(acces))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CONTENT_URL")
ptext = doc.createTextNode(i.aparcamiento.content_url)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "LOCALIZACION")
ptext = doc.createTextNode(i.aparcamiento.localizacion)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CLASE VIAL")
ptext = doc.createTextNode(i.aparcamiento.clase_vial)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "TIPO NUM")
ptext = doc.createTextNode(i.aparcamiento.tipo_num)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "NUM")
ptext = doc.createTextNode(str(i.aparcamiento.num))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "LOCALIDAD")
ptext = doc.createTextNode(i.aparcamiento.localidad)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "PROVINCIA")
ptext = doc.createTextNode(i.aparcamiento.provincia)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CODIGO POSTAL")
ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "BARRIO")
ptext = doc.createTextNode(i.aparcamiento.barrio)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "DISTRITO")
ptext = doc.createTextNode(i.aparcamiento.distrito)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "COORDENADA X")
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "COORDENADA Y")
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
datos = doc.createElement("DATOSDECONTACTO")
item.appendChild(datos)
atributo = doc.createElement("atributo")
datos.appendChild(atributo)
atributo.setAttribute("nombre", "TELEFONO")
ptext = doc.createTextNode(i.aparcamiento.telefono)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
datos.appendChild(atributo)
atributo.setAttribute("nombre", "EMAIL")
ptext = doc.createTextNode(i.aparcamiento.email)
atributo.appendChild(ptext)
except:
print("")
xml = doc.toprettyxml(indent=" ")
return HttpResponse(xml, content_type = "text/xml")
@csrf_exempt
def aparcamientos(request):
lista = lista_aparcamientos()
filtrar = '<form action="" method="POST">'
filtrar += '<br><br><input type="text" name="distrito">'
filtrar += '<input type="submit" value="Filtrar por distrito">'
template = get_template("aparcamientos.html")
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
else:
form_user = "Para loguearse vaya al botón de Inicio"
if request.method == "POST":
filtro_distrito = request.POST['distrito']
filtro_distrito = filtro_distrito.upper()
if filtro_distrito == '':
lista_filtrada = "No ha introducido ningún filtro, introduzca distrito para filtrar " + lista
else:
aparcamientos_filtrados = Aparcamiento.objects.all()
Encontrado = False
lista_filtrada = "Los aparcamientos en el " + filtro_distrito + " son: "
for i in aparcamientos_filtrados:
if filtro_distrito == i.distrito:
Encontrado = True
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista_filtrada += "<p>" + nombre_aparcamiento + "</p><li><a href=" + url_aparcamiento + ">" + url_aparcamiento + "</a></li>"
if Encontrado == False: #No es un distrito válido el que se ha introducido y no ha entrado por el bucle anterior
lista_filtrada = "Introduzca un nuevo distrito. " + filtro_distrito + " no es válido"
c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':form_user})
else:
c = Context({'distrito': filtrar, 'lista': lista, 'login':form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
@csrf_exempt
def aparcamientos_id(request, recurso):
template = get_template("aparcamientos.html")
num_megustas = 0
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
print(key)
#tipo = request.POST
#print(tipo)
#qd = urllib.unquote(tipo).decode("utf-8")
#qd = QueryDict(tipo).decode("utf-8")
#qd.getlist('Me Gusta')
#print(qd)
if key == 'Me+Gusta':
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1
aparcamiento.save()
num_megustas = aparcamiento.contador_megusta
else:
coment = request.POST['Comentario']
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_coments = aparcamiento.contador_coments + 1
aparcamiento.save()
p = Comentario (aparcamiento= aparcamiento, coment=coment)
p.save()
try:
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
nombre = aparcamiento.nombre
descripcion = aparcamiento.descripcion
accesibilidad = aparcamiento.accesibilidad
localizacion = aparcamiento.localizacion
via = aparcamiento.clase_vial
num = aparcamiento.num
localidad = aparcamiento.localidad
provincia = aparcamiento.provincia
codigo_postal = aparcamiento.codigo_postal
barrio = aparcamiento.barrio
distrito = aparcamiento.distrito
coordenada_x = aparcamiento.coordenada_x
coordenada_y = aparcamiento.coordenada_y
telefono = aparcamiento.telefono
email = aparcamiento.email
if telefono == '':
telefono = "No disponible"
if email == '':
email = "No disponible"
if accesibilidad == 1:
acces = "Libre"
else:
acces = "Ocupado"
lista_aparcamientos = Aparcamiento.objects.all()
list_coments = ""
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
num_megustas = aparcamiento.contador_megusta
for i in lista_aparcamientos:
if i.entidad == recurso:
comentarios = Comentario.objects.filter(aparcamiento=i)
if len(comentarios) != 0:
list_coments = "<li><p>COMENTARIOS</p><ol>"
for j in comentarios:
list_coments += "<li>" + j.coment + "<br>"
Response = "<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: " + recurso + "</br></p>"
Response += "<a href=" + i.content_url + ">" + i.nombre + "</a><br>"
Response += "Descripción: " + descripcion + "</br>"
Response += "Accesibilidad: " + acces + "</br>"
Response += "Localización: " + via + " " + localizacion + ", nº " + str(num)
Response += " " + localidad + " (" + str(codigo_postal) + ")</br>"
Response += "Ubicación: " + barrio + " " + distrito + " Coordenadas: " + str(coordenada_x) + " , " + str(coordenada_y) + "<br><br>"
Response += "INFORMACIÓN DE CONTACTO </br>"
Response += "Teléfono: " + telefono + "</br>"
Response += "Email: " + email + "</br>" + list_coments + "</ol>"
if num_megustas != 0:
Response += "</br><li>Numero de me gustas es: " + str(num_megustas) + "<br>"
else:
Response += "</br><li>Se el primero en indicar que te gusta la página<br>"
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
formulario = '<form action="" method="POST">'
formulario += '<br>Puede introducir un comentario si lo desea ' + str(request.user) + '<br><input type="text" name="Comentario">'
formulario += '<input type="submit" value="Comentar"></form>'
Response += formulario
else:
form_user = "Para loguearse vaya al botón de Inicio"
megusta = ''
megusta += '<br> Indica que te gusta este aparcamiento</br>'
megusta += '<form action="" method="POST">'
megusta += '<button type="submit" name="Me Gusta" value="Me Gusta"> +1 </button></form>'
Response += megusta
except ObjectDoesNotExist:
Response = "Este id no se corresponde con ningún aparcamiento"
c = Context({'lista': Response, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def about(request):
template = get_template("about.html")
Cuerpo = "DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>"
Cuerpo += "------------------------------------ Página principal ---------------------------------------------------"
Cuerpo += "<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>"
Cuerpo += "<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>"
Cuerpo += "<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>"
Cuerpo += "<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>"
Cuerpo += "------------------------------------ Página con los aparcamientos ---------------------------------------------------"
Cuerpo += "<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>"
Cuerpo += "<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>"
Cuerpo += "<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>"
Cuerpo += "------------------------------------ Interfaz pública de usuario ---------------------------------------------------"
Cuerpo += "<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>"
Cuerpo += "<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>"
Cuerpo += "------------------------------------ Interfaz privada de usuario ---------------------------------------------------"
Cuerpo += "<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>"
Cuerpo += "<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>"
Cuerpo += "<li> Formulario para cambiar el título de su página personal.</li>"
Cuerpo += "<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>"
Cuerpo += "<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>"
Cuerpo += "------------------------------------ Pie de pagina ---------------------------------------------------"
Cuerpo += "<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>"
Cuerpo += "<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>"
Cuerpo += "------------------------------------ Página XML de un usuario ---------------------------------------------------"
Cuerpo += "<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>"
c = Context({'lista': Cuerpo})
renderizado = template.render(c)
return HttpResponse(renderizado)
|
flexible
|
{
"blob_id": "e982fd5bed540b836fd4e2caaec033d8cbfb0e4f",
"index": 9854,
"step-1": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n<mask token>\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n@csrf_exempt\ndef loginuser(request):\n username = request.POST['Usuario']\n password = request.POST['Password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n direcc = '/' + str(user)\n return redirect(direcc)\n else:\n Error = 'Por favor, introduzca un usuario y contraseña válidos'\n template = get_template('fail.html')\n c = Context({'Error': Error})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef usuarios(request, peticion):\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n )\n formulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n css = '<form action=\"\" method=\"POST\">'\n css += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n css += (\n '<br><br>Modifique el color de letra\\t<input type=\"color\" name=\"Color\"><br>'\n )\n css += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n aparcamientos = Aparcamiento.objects.all()\n lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n lista += nombre_aparcamiento\n lista += '<form action=\"\" method=\"POST\">'\n lista += ('<button type=\"submit\" name=\"Seleccionar\" value=\"' +\n nombre_aparcamiento + '\">Seleccionar</button><br></form>')\n user_object = User.objects.get(username=peticion)\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Titulo':\n titulo = request.POST['Titulo']\n try:\n user = Usuario.objects.get(nombre=user_object)\n user.titulo_pagina = titulo\n user.save()\n except ObjectDoesNotExist:\n p = Usuario(nombre=user_object, titulo_pagina=titulo)\n p.save()\n elif key == 'Seleccionar':\n nombre_aparcamiento = request.POST['Seleccionar']\n today = datetime.datetime.today()\n try:\n selector = Usuario.objects.get(nombre=user_object)\n aparcamiento = Aparcamiento.objects.get(nombre=\n nombre_aparcamiento)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n selector = Usuario.objects.get(nombre=user_object)\n Check = False\n lista_usuario = Seleccionados.objects.filter(selector=selector)\n for i in lista_usuario:\n if nombre_aparcamiento == i.aparcamiento.nombre:\n Check = True\n if Check == False:\n p = Seleccionados(aparcamiento=aparcamiento, selector=\n selector, fecha_seleccion=today)\n p.save()\n elif key == 'Letra':\n letra = request.POST['Letra']\n color = request.POST['Color']\n try:\n user = Usuario.objects.get(nombre=user_object)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n user = Usuario.objects.get(nombre=user_object)\n if letra == '':\n letra = '15'\n user.letra = letra\n user.color = color\n user.save()\n lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,\n request)\n if request.user.is_authenticated():\n username = str(request.user)\n if peticion != username:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'titulo': titulo_pagina,\n 'login': form_user})\n else:\n template = get_template('privateuser.html')\n try:\n titulo_pagina = Usuario.objects.get(nombre=user_object\n ).titulo_pagina\n except ObjectDoesNotExist:\n titulo_pagina = 'Página personal de ' + str(request.user\n ) + '<br><br>'\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'lista': lista, 'form':\n formulario, 'css': css, 'titulo': titulo_pagina})\n else:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Para loguearse vaya al botón de Inicio'\n c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':\n seleccionados, 'titulo': titulo_pagina, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef personalizar(request):\n if request.user.is_authenticated():\n user_object = User.objects.get(username=request.user)\n user = Usuario.objects.get(nombre=user_object)\n letra = user.letra\n color = user.color\n else:\n letra = '14px'\n color = '#FCFCFC'\n css = get_template('change.css')\n c = Context({'letra': letra, 'color': color})\n renderizado = css.render(c)\n return HttpResponse(renderizado, content_type='text/css')\n\n\n<mask token>\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef about(request):\n template = get_template('about.html')\n Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'\n Cuerpo += (\n '------------------------------------ Página principal ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'\n )\n Cuerpo += (\n \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n )\n Cuerpo += (\n '<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'\n )\n Cuerpo += (\n '<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página con los aparcamientos ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n )\n Cuerpo += (\n \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n )\n Cuerpo += (\n '<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz pública de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz privada de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'\n )\n Cuerpo += (\n \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n )\n Cuerpo += (\n '<li> Formulario para cambiar el título de su página personal.</li>')\n Cuerpo += (\n '<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'\n )\n Cuerpo += (\n \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n )\n Cuerpo += (\n '------------------------------------ Pie de pagina ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página XML de un usuario ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n )\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n",
"step-3": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n@csrf_exempt\ndef loginuser(request):\n username = request.POST['Usuario']\n password = request.POST['Password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n direcc = '/' + str(user)\n return redirect(direcc)\n else:\n Error = 'Por favor, introduzca un usuario y contraseña válidos'\n template = get_template('fail.html')\n c = Context({'Error': Error})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef usuarios(request, peticion):\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n )\n formulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n css = '<form action=\"\" method=\"POST\">'\n css += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n css += (\n '<br><br>Modifique el color de letra\\t<input type=\"color\" name=\"Color\"><br>'\n )\n css += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n aparcamientos = Aparcamiento.objects.all()\n lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n lista += nombre_aparcamiento\n lista += '<form action=\"\" method=\"POST\">'\n lista += ('<button type=\"submit\" name=\"Seleccionar\" value=\"' +\n nombre_aparcamiento + '\">Seleccionar</button><br></form>')\n user_object = User.objects.get(username=peticion)\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Titulo':\n titulo = request.POST['Titulo']\n try:\n user = Usuario.objects.get(nombre=user_object)\n user.titulo_pagina = titulo\n user.save()\n except ObjectDoesNotExist:\n p = Usuario(nombre=user_object, titulo_pagina=titulo)\n p.save()\n elif key == 'Seleccionar':\n nombre_aparcamiento = request.POST['Seleccionar']\n today = datetime.datetime.today()\n try:\n selector = Usuario.objects.get(nombre=user_object)\n aparcamiento = Aparcamiento.objects.get(nombre=\n nombre_aparcamiento)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n selector = Usuario.objects.get(nombre=user_object)\n Check = False\n lista_usuario = Seleccionados.objects.filter(selector=selector)\n for i in lista_usuario:\n if nombre_aparcamiento == i.aparcamiento.nombre:\n Check = True\n if Check == False:\n p = Seleccionados(aparcamiento=aparcamiento, selector=\n selector, fecha_seleccion=today)\n p.save()\n elif key == 'Letra':\n letra = request.POST['Letra']\n color = request.POST['Color']\n try:\n user = Usuario.objects.get(nombre=user_object)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n user = Usuario.objects.get(nombre=user_object)\n if letra == '':\n letra = '15'\n user.letra = letra\n user.color = color\n user.save()\n lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,\n request)\n if request.user.is_authenticated():\n username = str(request.user)\n if peticion != username:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'titulo': titulo_pagina,\n 'login': form_user})\n else:\n template = get_template('privateuser.html')\n try:\n titulo_pagina = Usuario.objects.get(nombre=user_object\n ).titulo_pagina\n except ObjectDoesNotExist:\n titulo_pagina = 'Página personal de ' + str(request.user\n ) + '<br><br>'\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'lista': lista, 'form':\n formulario, 'css': css, 'titulo': titulo_pagina})\n else:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Para loguearse vaya al botón de Inicio'\n c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':\n seleccionados, 'titulo': titulo_pagina, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef personalizar(request):\n if request.user.is_authenticated():\n user_object = User.objects.get(username=request.user)\n user = Usuario.objects.get(nombre=user_object)\n letra = user.letra\n color = user.color\n else:\n letra = '14px'\n color = '#FCFCFC'\n css = get_template('change.css')\n c = Context({'letra': letra, 'color': color})\n renderizado = css.render(c)\n return HttpResponse(renderizado, content_type='text/css')\n\n\ndef usuarios_xml(request, peticion):\n user_object = User.objects.get(username=peticion)\n doc = Document()\n cont = doc.createElement('Contenidos')\n doc.appendChild(cont)\n info = doc.createElement('infoDataset')\n cont.appendChild(info)\n nombre = doc.createElement('Nombre')\n info.appendChild(nombre)\n ptext = doc.createTextNode(\n 'XML de aparcamientos seleccionados por el usuario ' + peticion)\n nombre.appendChild(ptext)\n url = doc.createElement('url')\n info.appendChild(url)\n ptext = doc.createTextNode('http://localhost:1234/' + peticion + '/xml/')\n url.appendChild(ptext)\n aparc = doc.createElement('Aparcamientos')\n cont.appendChild(aparc)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n for i in lista_seleccionados:\n item = doc.createElement('Contenido')\n aparc.appendChild(item)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ID-ENTIDAD')\n ptext = doc.createTextNode(i.aparcamiento.entidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NOMBRE')\n ptext = doc.createTextNode(i.aparcamiento.nombre)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DESCRIPCION')\n ptext = doc.createTextNode(i.aparcamiento.descripcion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ACCESIBILIDAD')\n if i.aparcamiento.accesibilidad == True:\n acces = 1\n else:\n acces = 0\n ptext = doc.createTextNode(str(acces))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CONTENT_URL')\n ptext = doc.createTextNode(i.aparcamiento.content_url)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIZACION')\n ptext = doc.createTextNode(i.aparcamiento.localizacion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CLASE VIAL')\n ptext = doc.createTextNode(i.aparcamiento.clase_vial)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'TIPO NUM')\n ptext = doc.createTextNode(i.aparcamiento.tipo_num)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NUM')\n ptext = doc.createTextNode(str(i.aparcamiento.num))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIDAD')\n ptext = doc.createTextNode(i.aparcamiento.localidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'PROVINCIA')\n ptext = doc.createTextNode(i.aparcamiento.provincia)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CODIGO POSTAL')\n ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'BARRIO')\n ptext = doc.createTextNode(i.aparcamiento.barrio)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DISTRITO')\n ptext = doc.createTextNode(i.aparcamiento.distrito)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA X')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA Y')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n datos = doc.createElement('DATOSDECONTACTO')\n item.appendChild(datos)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'TELEFONO')\n ptext = doc.createTextNode(i.aparcamiento.telefono)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'EMAIL')\n ptext = doc.createTextNode(i.aparcamiento.email)\n atributo.appendChild(ptext)\n except:\n print('')\n xml = doc.toprettyxml(indent=' ')\n return HttpResponse(xml, content_type='text/xml')\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef about(request):\n template = get_template('about.html')\n Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'\n Cuerpo += (\n '------------------------------------ Página principal ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'\n )\n Cuerpo += (\n \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n )\n Cuerpo += (\n '<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'\n )\n Cuerpo += (\n '<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página con los aparcamientos ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n )\n Cuerpo += (\n \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n )\n Cuerpo += (\n '<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz pública de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz privada de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'\n )\n Cuerpo += (\n \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n )\n Cuerpo += (\n '<li> Formulario para cambiar el título de su página personal.</li>')\n Cuerpo += (\n '<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'\n )\n Cuerpo += (\n \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n )\n Cuerpo += (\n '------------------------------------ Pie de pagina ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página XML de un usuario ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n )\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n",
"step-4": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n@csrf_exempt\ndef loginuser(request):\n username = request.POST['Usuario']\n password = request.POST['Password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n direcc = '/' + str(user)\n return redirect(direcc)\n else:\n Error = 'Por favor, introduzca un usuario y contraseña válidos'\n template = get_template('fail.html')\n c = Context({'Error': Error})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef mylogout(request):\n logout(request)\n return redirect('/')\n\n\n@csrf_exempt\ndef usuarios(request, peticion):\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n )\n formulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n css = '<form action=\"\" method=\"POST\">'\n css += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n css += (\n '<br><br>Modifique el color de letra\\t<input type=\"color\" name=\"Color\"><br>'\n )\n css += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n aparcamientos = Aparcamiento.objects.all()\n lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n lista += nombre_aparcamiento\n lista += '<form action=\"\" method=\"POST\">'\n lista += ('<button type=\"submit\" name=\"Seleccionar\" value=\"' +\n nombre_aparcamiento + '\">Seleccionar</button><br></form>')\n user_object = User.objects.get(username=peticion)\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Titulo':\n titulo = request.POST['Titulo']\n try:\n user = Usuario.objects.get(nombre=user_object)\n user.titulo_pagina = titulo\n user.save()\n except ObjectDoesNotExist:\n p = Usuario(nombre=user_object, titulo_pagina=titulo)\n p.save()\n elif key == 'Seleccionar':\n nombre_aparcamiento = request.POST['Seleccionar']\n today = datetime.datetime.today()\n try:\n selector = Usuario.objects.get(nombre=user_object)\n aparcamiento = Aparcamiento.objects.get(nombre=\n nombre_aparcamiento)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n selector = Usuario.objects.get(nombre=user_object)\n Check = False\n lista_usuario = Seleccionados.objects.filter(selector=selector)\n for i in lista_usuario:\n if nombre_aparcamiento == i.aparcamiento.nombre:\n Check = True\n if Check == False:\n p = Seleccionados(aparcamiento=aparcamiento, selector=\n selector, fecha_seleccion=today)\n p.save()\n elif key == 'Letra':\n letra = request.POST['Letra']\n color = request.POST['Color']\n try:\n user = Usuario.objects.get(nombre=user_object)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n user = Usuario.objects.get(nombre=user_object)\n if letra == '':\n letra = '15'\n user.letra = letra\n user.color = color\n user.save()\n lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,\n request)\n if request.user.is_authenticated():\n username = str(request.user)\n if peticion != username:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'titulo': titulo_pagina,\n 'login': form_user})\n else:\n template = get_template('privateuser.html')\n try:\n titulo_pagina = Usuario.objects.get(nombre=user_object\n ).titulo_pagina\n except ObjectDoesNotExist:\n titulo_pagina = 'Página personal de ' + str(request.user\n ) + '<br><br>'\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'lista': lista, 'form':\n formulario, 'css': css, 'titulo': titulo_pagina})\n else:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Para loguearse vaya al botón de Inicio'\n c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':\n seleccionados, 'titulo': titulo_pagina, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef personalizar(request):\n if request.user.is_authenticated():\n user_object = User.objects.get(username=request.user)\n user = Usuario.objects.get(nombre=user_object)\n letra = user.letra\n color = user.color\n else:\n letra = '14px'\n color = '#FCFCFC'\n css = get_template('change.css')\n c = Context({'letra': letra, 'color': color})\n renderizado = css.render(c)\n return HttpResponse(renderizado, content_type='text/css')\n\n\ndef usuarios_xml(request, peticion):\n user_object = User.objects.get(username=peticion)\n doc = Document()\n cont = doc.createElement('Contenidos')\n doc.appendChild(cont)\n info = doc.createElement('infoDataset')\n cont.appendChild(info)\n nombre = doc.createElement('Nombre')\n info.appendChild(nombre)\n ptext = doc.createTextNode(\n 'XML de aparcamientos seleccionados por el usuario ' + peticion)\n nombre.appendChild(ptext)\n url = doc.createElement('url')\n info.appendChild(url)\n ptext = doc.createTextNode('http://localhost:1234/' + peticion + '/xml/')\n url.appendChild(ptext)\n aparc = doc.createElement('Aparcamientos')\n cont.appendChild(aparc)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n for i in lista_seleccionados:\n item = doc.createElement('Contenido')\n aparc.appendChild(item)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ID-ENTIDAD')\n ptext = doc.createTextNode(i.aparcamiento.entidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NOMBRE')\n ptext = doc.createTextNode(i.aparcamiento.nombre)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DESCRIPCION')\n ptext = doc.createTextNode(i.aparcamiento.descripcion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ACCESIBILIDAD')\n if i.aparcamiento.accesibilidad == True:\n acces = 1\n else:\n acces = 0\n ptext = doc.createTextNode(str(acces))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CONTENT_URL')\n ptext = doc.createTextNode(i.aparcamiento.content_url)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIZACION')\n ptext = doc.createTextNode(i.aparcamiento.localizacion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CLASE VIAL')\n ptext = doc.createTextNode(i.aparcamiento.clase_vial)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'TIPO NUM')\n ptext = doc.createTextNode(i.aparcamiento.tipo_num)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NUM')\n ptext = doc.createTextNode(str(i.aparcamiento.num))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIDAD')\n ptext = doc.createTextNode(i.aparcamiento.localidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'PROVINCIA')\n ptext = doc.createTextNode(i.aparcamiento.provincia)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CODIGO POSTAL')\n ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'BARRIO')\n ptext = doc.createTextNode(i.aparcamiento.barrio)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DISTRITO')\n ptext = doc.createTextNode(i.aparcamiento.distrito)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA X')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA Y')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n datos = doc.createElement('DATOSDECONTACTO')\n item.appendChild(datos)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'TELEFONO')\n ptext = doc.createTextNode(i.aparcamiento.telefono)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'EMAIL')\n ptext = doc.createTextNode(i.aparcamiento.email)\n atributo.appendChild(ptext)\n except:\n print('')\n xml = doc.toprettyxml(indent=' ')\n return HttpResponse(xml, content_type='text/xml')\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef about(request):\n template = get_template('about.html')\n Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'\n Cuerpo += (\n '------------------------------------ Página principal ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'\n )\n Cuerpo += (\n \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n )\n Cuerpo += (\n '<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'\n )\n Cuerpo += (\n '<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página con los aparcamientos ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n )\n Cuerpo += (\n \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n )\n Cuerpo += (\n '<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz pública de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz privada de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'\n )\n Cuerpo += (\n \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n )\n Cuerpo += (\n '<li> Formulario para cambiar el título de su página personal.</li>')\n Cuerpo += (\n '<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'\n )\n Cuerpo += (\n \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n )\n Cuerpo += (\n '------------------------------------ Pie de pagina ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página XML de un usuario ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n )\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n",
"step-5": "from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponse\nfrom .models import *\nfrom django.contrib.auth import logout, authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template.context_processors import csrf\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nimport xml.etree.ElementTree as etree\nfrom xml.dom.minidom import Document, parse\nimport xml.dom.minidom as dom\n\nimport datetime\nimport sys\nfrom .parser import get_data\nfrom django.http import QueryDict\nimport urllib\n\n\n# Create your views here.\n@csrf_exempt\ndef login_form(request):\n\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n@csrf_exempt\ndef loginuser(request):\n\n\tusername = request.POST['Usuario']\n\tpassword = request.POST['Password']\n\tuser = authenticate(username=username, password=password)\n\tif user is not None:\n\t\tlogin(request,user)\n\t\tdirecc = '/' + str(user)\n\t\treturn redirect(direcc)\n\telse:\n\t\tError = \"Por favor, introduzca un usuario y contraseña válidos\"\n\t\ttemplate = get_template(\"fail.html\")\n\t\tc = Context ({'Error': Error})\n\t\trenderizado = template.render(c)\n\t\treturn HttpResponse(renderizado)\n\ndef lista_megustas():\n\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by(\"-contador_megusta\")[:5]\n Response = \"LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>\"\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n #comentarios = Comentario.objects.filter(aparcamiento=i)\n if megustas != 0:\n Response += \"<li><a href=\" + i.content_url + \">\" + i.nombre + \"<br></a>\"\n Response += \"Dirección: \" + i.clase_vial + \" \" + i.localizacion + \", nº \" + str(i.num)\n Response += \"<br><a href=http://localhost:1234/aparcamientos/\" + i.entidad + \">\" + \"Más información<br></a><br>\"\n Existe = True\n if Existe == False:\n Response += \"Aún no se han registrado comentarios para ningún aparcamiento\"\n\n Response += \"</br></br>\"\n return Response\n\ndef paginas_personales():\n\n\tLista = \"PÁGINAS DE USUARIOS<br><br>\"\n\tusuarios = User.objects.all()\n\tfor i in usuarios:\n\t\ttry:\n\t\t\tpagina = Usuario.objects.get(nombre=i.id).titulo_pagina\n\t\texcept ObjectDoesNotExist:\n\t\t\tpagina = \"Página de \" + i.username\n\t\tLista += \"<a href=http://localhost:1234/\" + i.username + \">\" + pagina + \"</a>\tUsuario: \" + i.username + \"<br>\"\n\n\treturn Lista\n\ndef lista_aparcamientos():\n\n\tlista = ''\n\taparcamientos = Aparcamiento.objects.all()\n\tfor aparcamiento in aparcamientos:\n\t\tnombre_aparcamiento = aparcamiento.nombre\n\t\turl_aparcamiento = aparcamiento.entidad\n\t\tlista += '<li><p>' + nombre_aparcamiento + '<a href=\"' + url_aparcamiento + '\">\t--> Más información</a></p></li>'\n\n\treturn lista\n\ndef aparcamientos_seleccionados(user,request):\n\n\tuser_object = User.objects.get(username=user)\n\n\ttry:\n\t\tusuario = Usuario.objects.get(nombre=user_object)\n\t\tlista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n\n\t\tpaginator = Paginator(lista_seleccionados,5)\n\t\tpage = request.GET.get('page')\n\t\ttry:\n\t\t\tseleccionados = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\t# If page is not an integer, deliver first page.\n\t\t\tseleccionados = paginator.page(1)\n\t\texcept EmptyPage:\n\t\t # If page is out of range (e.g. 9999), deliver last page of results.\n\t\t\tseleccionados = paginator.page(paginator.num_pages)\n\n\t\tlista = \"Listado de aparcamientos seleccionados por \" + user + \"<br>\"\n\n\t\tfor i in seleccionados:\n\t\t\tlista += \"<br><li>Fecha de selección: \" + str(i.fecha_seleccion)\n\t\t\tlista += \"<br><a href=\" + i.aparcamiento.content_url + \">\" + i.aparcamiento.nombre + \"<br></a>\"\n\t\t\tlista += \"Dirección: \" + i.aparcamiento.clase_vial + \" \" + i.aparcamiento.localizacion + \", nº \" + str(i.aparcamiento.num)\n\t\t\tlista += \"<br><a href=http://localhost:1234/aparcamientos/\" + i.aparcamiento.entidad + \">\" + \"Más información</a><br>\"\n\texcept ObjectDoesNotExist:\n\t\tlista = \"El usuario aún no ha seleccionado ningún aparcamiento\"\n\t\tseleccionados = \"\"\n\n\n\treturn lista,seleccionados\n\ndef accesibles(value):\n\taccesibles = '<form action=\"\" method=\"POST\">'\n\taccesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value) + '\"> Accesibles</button></form>'\n\n\treturn accesibles\n\n@csrf_exempt\ndef pagina_principal(request):\n\n\tformulario = login_form(request)\n\tlist_megustas = lista_megustas()\n\tusers = paginas_personales()\n\n\tvalue = 1\n\taccesible = accesibles(value)\n\n\ttemplate = get_template(\"index.html\")\n\n\tif request.user.is_authenticated():\n\t\tusername = str(request.user)\n\t\tformulario = 'Bienvenido ' + username\n\t\tformulario += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\n\tif request.method == 'POST':\n\n\t\tkey = request.body.decode(\"utf-8\").split('=')[0]\n\n\t\tif key == 'Accesible':\n\t\t\tvalue = request.POST['Accesible']\n\n\t\t\tif value == '1':\n\t\t\t\tlista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n\t\t\t\tlista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n\t\t\t\tvalue = 0\n\t\t\t\tfor i in lista_accesibles:\n\t\t\t\t\tnombre_aparcamiento = i.nombre\n\t\t\t\t\turl_aparcamiento = i.content_url\n\t\t\t\t\tlista += \"<li><p>\" + nombre_aparcamiento + \"</p><a href=\" + url_aparcamiento + \">\" + url_aparcamiento + \"</a></li>\"\n\t\t\telse:\n\t\t\t\tlista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n\t\t\t\taparcamientos = Aparcamiento.objects.all()\n\t\t\t\tfor aparcamiento in aparcamientos:\n\t\t\t\t\tnombre_aparcamiento = aparcamiento.nombre\n\t\t\t\t\turl_aparcamiento = aparcamiento.entidad\n\t\t\t\t\tlista += '<li><p>' + nombre_aparcamiento + '. URL del aparcamiento: ' + '<a href=\"aparcamientos/' + url_aparcamiento + '\">\t⇾ Más información</a></br></p>'\n\t\t\t\tvalue = 1\n\n\t\t\taccesible = accesibles(value)\n\t\t\tc = Context({'login': formulario, 'list_users':lista, 'accesible': accesible})\n\n\telse:\n\n\t\tinit = Aparcamiento.objects.all()\n\n\t\tif len(init) == 0:\n\t\t\tget_data()\n\n\n\t\tc = Context({'login': formulario, 'list':list_megustas, 'list_users':users, 'accesible': accesible})\n\n\trenderizado = template.render(c)\n\treturn HttpResponse(renderizado)\n\ndef mylogout(request):\n\tlogout(request)\n\treturn redirect(\"/\")\n\n@csrf_exempt\ndef usuarios(request, peticion):\n\n\tformulario = '<form action=\"\" method=\"POST\">'\n\tformulario += '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n\tformulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n\n\tcss = '<form action=\"\" method=\"POST\">'\n\tcss += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n\tcss += '<br><br>Modifique el color de letra\t<input type=\"color\" name=\"Color\"><br>'\n\tcss += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n\n\n\taparcamientos = Aparcamiento.objects.all()\n\n\tlista= \"<br>LISTADO DE APARCAMIENTOS<br><br>\"\n\tfor aparcamiento in aparcamientos:\n\t\tnombre_aparcamiento = aparcamiento.nombre\n\t\tlista += nombre_aparcamiento\n\t\tlista += '<form action=\"\" method=\"POST\">'\n\t\tlista += '<button type=\"submit\" name=\"Seleccionar\" value=\"' + nombre_aparcamiento + '\">Seleccionar</button><br></form>'\n\n\tuser_object= User.objects.get(username=peticion)\n\n\tif request.method == 'POST':\n\t\tkey = request.body.decode(\"utf-8\").split('=')[0]\n\t\tif key == \"Titulo\":\n\t\t\ttitulo = request.POST['Titulo']\n\t\t\ttry:\n\t\t\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\t\t\tuser.titulo_pagina = titulo\n\t\t\t\tuser.save()\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\tp = Usuario(nombre=user_object, titulo_pagina=titulo)\n\t\t\t\tp.save()\n\n\t\telif key == \"Seleccionar\":\n\t\t\tnombre_aparcamiento = request.POST['Seleccionar']\n\t\t\ttoday = datetime.datetime.today()\n\n\n\t\t\ttry:\n\t\t\t\tselector = Usuario.objects.get(nombre=user_object)\n\t\t\t\taparcamiento = Aparcamiento.objects.get(nombre=nombre_aparcamiento)\n\t\t\texcept:\n\t\t\t\tp = Usuario(nombre=user_object)\n\t\t\t\tp.save()\n\t\t\t\tselector = Usuario.objects.get(nombre=user_object)\n\n\n\t\t\tCheck = False\n\t\t\tlista_usuario = Seleccionados.objects.filter(selector=selector)\n\t\t\tfor i in lista_usuario:\n\t\t\t\tif\tnombre_aparcamiento == i.aparcamiento.nombre:\n\t\t\t\t\tCheck=True\n\n\t\t\tif Check == False:\n\t\t\t\tp = Seleccionados(aparcamiento=aparcamiento, selector=selector, fecha_seleccion=today)\n\t\t\t\tp.save()\n\n\t\telif key == \"Letra\":\n\t\t\tletra = request.POST['Letra']\n\t\t\tcolor = request.POST['Color']\n\n\t\t\ttry:\n\t\t\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\t\texcept:\n\t\t\t\tp = Usuario(nombre=user_object)\n\t\t\t\tp.save()\n\t\t\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\t\tif letra == \"\":\n\t\t\t\tletra = \"15\"\n\n\t\t\tuser.letra = letra\n\t\t\tuser.color = color\n\t\t\tuser.save()\n\n\tlista_seleccionados, seleccionados= aparcamientos_seleccionados(peticion,request)\n\n\n\tif request.user.is_authenticated():\n\t\tusername = str(request.user)\n\t\tif peticion != username: #Si no es igual es que solo puedo acceder a la parte publica, ya qu eno es la mia\n\t\t\ttemplate = get_template(\"publicuser.html\")\n\t\t\ttitulo_pagina = \"Página pública de \" + peticion + \"<br><br>\"\n\t\t\tform_user = 'Bienvenido ' + username\n\t\t\tform_user += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\t\t\tc = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})\n\t\telse:\t #Si es igual es que es la mia y puedo acceder a la parte privada, ya que es lamia\n\t\t\ttemplate = get_template(\"privateuser.html\")\n\t\t\ttry:\n\t\t\t\ttitulo_pagina = Usuario.objects.get(nombre=user_object).titulo_pagina\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\ttitulo_pagina = \"Página personal de \" + str(request.user) + \"<br><br>\"\n\t\t\tc = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'lista': lista, 'form': formulario, 'css':css, 'titulo': titulo_pagina})\n\telse:\n\t\ttemplate = get_template(\"publicuser.html\")\n\t\ttitulo_pagina = \"Página pública de \" + peticion + \"<br><br>\"\n\t\tform_user = 'Para loguearse vaya al botón de Inicio'\n\t\tc = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})\n\n\n\trenderizado = template.render(c)\n\treturn HttpResponse(renderizado)\n\ndef personalizar(request):\n\tif request.user.is_authenticated():\n\t\tuser_object = User.objects.get(username=request.user)\n\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\tletra = user.letra\n\t\tcolor = user.color\n\telse:\n\t\tletra = \"14px\"\n\t\tcolor = \"#FCFCFC\"\n\n\tcss = get_template(\"change.css\")\n\tc = Context({'letra':letra, 'color':color})\n\trenderizado = css.render(c)\n\n\treturn HttpResponse(renderizado, content_type=\"text/css\")\n\ndef usuarios_xml(request, peticion):\n\n\tuser_object = User.objects.get(username=peticion)\n\n\tdoc = Document()\n\tcont = doc.createElement(\"Contenidos\")\n\tdoc.appendChild(cont)\n\tinfo = doc.createElement(\"infoDataset\")\n\tcont.appendChild(info)\n\tnombre = doc.createElement(\"Nombre\")\n\tinfo.appendChild(nombre)\n\tptext = doc.createTextNode(\"XML de aparcamientos seleccionados por el usuario \" + peticion)\n\tnombre.appendChild(ptext)\n\turl = doc.createElement(\"url\")\n\tinfo.appendChild(url)\n\tptext = doc.createTextNode(\"http://localhost:1234/\" + peticion + \"/xml/\")\n\turl.appendChild(ptext)\n\taparc = doc.createElement(\"Aparcamientos\")\n\tcont.appendChild(aparc)\n\n\ttry:\n\t\tusuario = Usuario.objects.get(nombre=user_object)\n\t\tlista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n\n\n\t\tfor i in lista_seleccionados:\n\t\t\titem = doc.createElement(\"Contenido\")\n\t\t\taparc.appendChild(item)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"ID-ENTIDAD\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.entidad)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"NOMBRE\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.nombre)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"DESCRIPCION\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.descripcion)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"ACCESIBILIDAD\")\n\t\t\tif i.aparcamiento.accesibilidad == True:\n\t\t\t\tacces = 1\n\t\t\telse:\n\t\t\t\tacces = 0\n\t\t\tptext = doc.createTextNode(str(acces))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"CONTENT_URL\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.content_url)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"LOCALIZACION\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.localizacion)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"CLASE VIAL\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.clase_vial)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"TIPO NUM\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.tipo_num)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"NUM\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.num))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"LOCALIDAD\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.localidad)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"PROVINCIA\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.provincia)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"CODIGO POSTAL\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"BARRIO\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.barrio)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"DISTRITO\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.distrito)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"COORDENADA X\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"COORDENADA Y\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tdatos = doc.createElement(\"DATOSDECONTACTO\")\n\t\t\titem.appendChild(datos)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\tdatos.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"TELEFONO\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.telefono)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\tdatos.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"EMAIL\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.email)\n\t\t\tatributo.appendChild(ptext)\n\texcept:\n\t\tprint(\"\")\n\n\n\txml = doc.toprettyxml(indent=\" \")\n\treturn HttpResponse(xml, content_type = \"text/xml\")\n\n@csrf_exempt\ndef aparcamientos(request):\n\n\tlista = lista_aparcamientos()\n\n\tfiltrar = '<form action=\"\" method=\"POST\">'\n\tfiltrar += '<br><br><input type=\"text\" name=\"distrito\">'\n\tfiltrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n\n\ttemplate = get_template(\"aparcamientos.html\")\n\n\tif request.user.is_authenticated():\n\t\tusername = str(request.user)\n\t\tform_user = 'Bienvenido ' + username\n\t\tform_user += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\telse:\n\t\tform_user = \"Para loguearse vaya al botón de Inicio\"\n\n\tif request.method == \"POST\":\n\t\tfiltro_distrito = request.POST['distrito']\n\t\tfiltro_distrito = filtro_distrito.upper()\n\n\t\tif filtro_distrito == '':\n\t\t\tlista_filtrada = \"No ha introducido ningún filtro, introduzca distrito para filtrar \" + lista\n\t\telse:\n\t\t\taparcamientos_filtrados = Aparcamiento.objects.all()\n\t\t\tEncontrado = False\n\t\t\tlista_filtrada = \"Los aparcamientos en el \" + filtro_distrito + \" son: \"\n\t\t\tfor i in aparcamientos_filtrados:\n\t\t\t\tif filtro_distrito == i.distrito:\n\t\t\t\t\tEncontrado = True\n\t\t\t\t\tnombre_aparcamiento = i.nombre\n\t\t\t\t\turl_aparcamiento = i.content_url\n\t\t\t\t\tlista_filtrada += \"<p>\" + nombre_aparcamiento + \"</p><li><a href=\" + url_aparcamiento + \">\" + url_aparcamiento + \"</a></li>\"\n\n\n\t\t\tif Encontrado == False:\t\t#No es un distrito válido el que se ha introducido y no ha entrado por el bucle anterior\n\t\t\t\tlista_filtrada = \"Introduzca un nuevo distrito. \" + filtro_distrito + \" no es válido\"\n\n\n\t\tc = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':form_user})\n\n\telse:\n\n\t\tc = Context({'distrito': filtrar, 'lista': lista, 'login':form_user})\n\n\n\trenderizado = template.render(c)\n\treturn HttpResponse(renderizado)\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n\n template = get_template(\"aparcamientos.html\")\n num_megustas = 0\n\n if request.method == 'POST':\n key = request.body.decode(\"utf-8\").split('=')[0]\n print(key)\n\n #tipo = request.POST\n #print(tipo)\n #qd = urllib.unquote(tipo).decode(\"utf-8\")\n #qd = QueryDict(tipo).decode(\"utf-8\")\n #qd.getlist('Me Gusta')\n #print(qd)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n\n p = Comentario (aparcamiento= aparcamiento, coment=coment)\n p.save()\n\n\n\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n\n if telefono == '':\n telefono = \"No disponible\"\n\n if email == '':\n email = \"No disponible\"\n\n if accesibilidad == 1:\n acces = \"Libre\"\n else:\n acces = \"Ocupado\"\n\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = \"\"\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = \"<li><p>COMENTARIOS</p><ol>\"\n for j in comentarios:\n list_coments += \"<li>\" + j.coment + \"<br>\"\n\n Response = \"<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: \" + recurso + \"</br></p>\"\n Response += \"<a href=\" + i.content_url + \">\" + i.nombre + \"</a><br>\"\n Response += \"Descripción: \" + descripcion + \"</br>\"\n Response += \"Accesibilidad: \" + acces + \"</br>\"\n Response += \"Localización: \" + via + \" \" + localizacion + \", nº \" + str(num)\n Response += \" \" + localidad + \" (\" + str(codigo_postal) + \")</br>\"\n Response += \"Ubicación: \" + barrio + \" \" + distrito + \" Coordenadas: \" + str(coordenada_x) + \" , \" + str(coordenada_y) + \"<br><br>\"\n Response += \"INFORMACIÓN DE CONTACTO </br>\"\n Response += \"Teléfono: \" + telefono + \"</br>\"\n Response += \"Email: \" + email + \"</br>\" + list_coments + \"</ol>\"\n if num_megustas != 0:\n Response += \"</br><li>Numero de me gustas es: \" + str(num_megustas) + \"<br>\"\n else:\n Response += \"</br><li>Se el primero en indicar que te gusta la página<br>\"\n\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += '<br>Puede introducir un comentario si lo desea ' + str(request.user) + '<br><input type=\"text\" name=\"Comentario\">'\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n\n else:\n form_user = \"Para loguearse vaya al botón de Inicio\"\n\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n Response += megusta\n\n except ObjectDoesNotExist:\n Response = \"Este id no se corresponde con ningún aparcamiento\"\n\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\ndef about(request):\n\n template = get_template(\"about.html\")\n\n Cuerpo = \"DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>\"\n Cuerpo += \"------------------------------------ Página principal ---------------------------------------------------\"\n Cuerpo += \"<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>\"\n Cuerpo += \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n Cuerpo += \"<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>\"\n Cuerpo += \"<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>\"\n Cuerpo += \"------------------------------------ Página con los aparcamientos ---------------------------------------------------\"\n Cuerpo += \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n Cuerpo += \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n Cuerpo += \"<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>\"\n Cuerpo += \"------------------------------------ Interfaz pública de usuario ---------------------------------------------------\"\n Cuerpo += \"<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>\"\n Cuerpo += \"<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>\"\n Cuerpo += \"------------------------------------ Interfaz privada de usuario ---------------------------------------------------\"\n Cuerpo += \"<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>\"\n Cuerpo += \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n Cuerpo += \"<li> Formulario para cambiar el título de su página personal.</li>\"\n Cuerpo += \"<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>\"\n Cuerpo += \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n Cuerpo += \"------------------------------------ Pie de pagina ---------------------------------------------------\"\n Cuerpo += \"<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>\"\n Cuerpo += \"<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>\"\n Cuerpo += \"------------------------------------ Página XML de un usuario ---------------------------------------------------\"\n Cuerpo += \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n\n return HttpResponse(renderizado)\n",
"step-ids": [
8,
12,
13,
14,
17
]
}
|
[
8,
12,
13,
14,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=
10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):
"""Return match boolean and match score.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:param int adj_bonus: bonus for adjacent matches
:param int sep_bonus: bonus if match occurs after a separator
:param int camel_bonus: bonus if match is uppercase
:param int lead_penalty: penalty applied for each letter before 1st match
:param int max_lead_penalty: maximum total ``lead_penalty``
:param int unmatched_penalty: penalty for each unmatched letter
:return: 2-tuple with match truthiness at idx 0 and score at idx 1
:rtype: ``tuple``
"""
score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)
prev_match, prev_lower = False, False
prev_sep = True
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
matched_indices = []
while s_idx != s_len:
p_char = pattern[p_idx] if p_idx != p_len else None
s_char = instring[s_idx]
p_lower = p_char.lower() if p_char else None
s_lower, s_upper = s_char.lower(), s_char.upper()
next_match = p_char and p_lower == s_lower
rematch = best_letter and best_lower == s_lower
advanced = next_match and best_letter
p_repeat = best_letter and p_char and best_lower == p_lower
if advanced or p_repeat:
score += best_letter_score
matched_indices.append(best_letter_idx)
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
if next_match or rematch:
new_score = 0
if p_idx == 0:
score += max(s_idx * lead_penalty, max_lead_penalty)
if prev_match:
new_score += adj_bonus
if prev_sep:
new_score += sep_bonus
if prev_lower and s_char == s_upper and s_lower != s_upper:
new_score += camel_bonus
if next_match:
p_idx += 1
if new_score >= best_letter_score:
if best_letter is not None:
score += unmatched_penalty
best_letter = s_char
best_lower = best_letter.lower()
best_letter_idx = s_idx
best_letter_score = new_score
prev_match = True
else:
score += unmatched_penalty
prev_match = False
prev_lower = s_char == s_lower and s_lower != s_upper
prev_sep = s_char in '_ '
s_idx += 1
if best_letter:
score += best_letter_score
matched_indices.append(best_letter_idx)
return p_idx == p_len, score
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fuzzy_match_simple(pattern, instring):
"""Return True if each character in pattern is found in order in instring.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:return: True if there is a match, False otherwise
:rtype: ``bool``
"""
p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)
while p_idx != p_len and s_idx != s_len:
if pattern[p_idx].lower() == instring[s_idx].lower():
p_idx += 1
s_idx += 1
return p_len != 0 and s_len != 0 and p_idx == p_len
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=
10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):
"""Return match boolean and match score.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:param int adj_bonus: bonus for adjacent matches
:param int sep_bonus: bonus if match occurs after a separator
:param int camel_bonus: bonus if match is uppercase
:param int lead_penalty: penalty applied for each letter before 1st match
:param int max_lead_penalty: maximum total ``lead_penalty``
:param int unmatched_penalty: penalty for each unmatched letter
:return: 2-tuple with match truthiness at idx 0 and score at idx 1
:rtype: ``tuple``
"""
score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)
prev_match, prev_lower = False, False
prev_sep = True
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
matched_indices = []
while s_idx != s_len:
p_char = pattern[p_idx] if p_idx != p_len else None
s_char = instring[s_idx]
p_lower = p_char.lower() if p_char else None
s_lower, s_upper = s_char.lower(), s_char.upper()
next_match = p_char and p_lower == s_lower
rematch = best_letter and best_lower == s_lower
advanced = next_match and best_letter
p_repeat = best_letter and p_char and best_lower == p_lower
if advanced or p_repeat:
score += best_letter_score
matched_indices.append(best_letter_idx)
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
if next_match or rematch:
new_score = 0
if p_idx == 0:
score += max(s_idx * lead_penalty, max_lead_penalty)
if prev_match:
new_score += adj_bonus
if prev_sep:
new_score += sep_bonus
if prev_lower and s_char == s_upper and s_lower != s_upper:
new_score += camel_bonus
if next_match:
p_idx += 1
if new_score >= best_letter_score:
if best_letter is not None:
score += unmatched_penalty
best_letter = s_char
best_lower = best_letter.lower()
best_letter_idx = s_idx
best_letter_score = new_score
prev_match = True
else:
score += unmatched_penalty
prev_match = False
prev_lower = s_char == s_lower and s_lower != s_upper
prev_sep = s_char in '_ '
s_idx += 1
if best_letter:
score += best_letter_score
matched_indices.append(best_letter_idx)
return p_idx == p_len, score
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import unicode_literals
def fuzzy_match_simple(pattern, instring):
"""Return True if each character in pattern is found in order in instring.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:return: True if there is a match, False otherwise
:rtype: ``bool``
"""
p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)
while p_idx != p_len and s_idx != s_len:
if pattern[p_idx].lower() == instring[s_idx].lower():
p_idx += 1
s_idx += 1
return p_len != 0 and s_len != 0 and p_idx == p_len
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=
10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):
"""Return match boolean and match score.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:param int adj_bonus: bonus for adjacent matches
:param int sep_bonus: bonus if match occurs after a separator
:param int camel_bonus: bonus if match is uppercase
:param int lead_penalty: penalty applied for each letter before 1st match
:param int max_lead_penalty: maximum total ``lead_penalty``
:param int unmatched_penalty: penalty for each unmatched letter
:return: 2-tuple with match truthiness at idx 0 and score at idx 1
:rtype: ``tuple``
"""
score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)
prev_match, prev_lower = False, False
prev_sep = True
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
matched_indices = []
while s_idx != s_len:
p_char = pattern[p_idx] if p_idx != p_len else None
s_char = instring[s_idx]
p_lower = p_char.lower() if p_char else None
s_lower, s_upper = s_char.lower(), s_char.upper()
next_match = p_char and p_lower == s_lower
rematch = best_letter and best_lower == s_lower
advanced = next_match and best_letter
p_repeat = best_letter and p_char and best_lower == p_lower
if advanced or p_repeat:
score += best_letter_score
matched_indices.append(best_letter_idx)
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
if next_match or rematch:
new_score = 0
if p_idx == 0:
score += max(s_idx * lead_penalty, max_lead_penalty)
if prev_match:
new_score += adj_bonus
if prev_sep:
new_score += sep_bonus
if prev_lower and s_char == s_upper and s_lower != s_upper:
new_score += camel_bonus
if next_match:
p_idx += 1
if new_score >= best_letter_score:
if best_letter is not None:
score += unmatched_penalty
best_letter = s_char
best_lower = best_letter.lower()
best_letter_idx = s_idx
best_letter_score = new_score
prev_match = True
else:
score += unmatched_penalty
prev_match = False
prev_lower = s_char == s_lower and s_lower != s_upper
prev_sep = s_char in '_ '
s_idx += 1
if best_letter:
score += best_letter_score
matched_indices.append(best_letter_idx)
return p_idx == p_len, score
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Matt Menzenski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
def fuzzy_match_simple(pattern, instring):
"""Return True if each character in pattern is found in order in instring.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:return: True if there is a match, False otherwise
:rtype: ``bool``
"""
p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)
while (p_idx != p_len) and (s_idx != s_len):
if pattern[p_idx].lower() == instring[s_idx].lower():
p_idx += 1
s_idx += 1
return p_len != 0 and s_len != 0 and p_idx == p_len
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10,
lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):
"""Return match boolean and match score.
:param pattern: the pattern to be matched
:type pattern: ``str``
:param instring: the containing string to search against
:type instring: ``str``
:param int adj_bonus: bonus for adjacent matches
:param int sep_bonus: bonus if match occurs after a separator
:param int camel_bonus: bonus if match is uppercase
:param int lead_penalty: penalty applied for each letter before 1st match
:param int max_lead_penalty: maximum total ``lead_penalty``
:param int unmatched_penalty: penalty for each unmatched letter
:return: 2-tuple with match truthiness at idx 0 and score at idx 1
:rtype: ``tuple``
"""
score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)
prev_match, prev_lower = False, False
prev_sep = True # so that matching first letter gets sep_bonus
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
matched_indices = []
while s_idx != s_len:
p_char = pattern[p_idx] if (p_idx != p_len) else None
s_char = instring[s_idx]
p_lower = p_char.lower() if p_char else None
s_lower, s_upper = s_char.lower(), s_char.upper()
next_match = p_char and p_lower == s_lower
rematch = best_letter and best_lower == s_lower
advanced = next_match and best_letter
p_repeat = best_letter and p_char and best_lower == p_lower
if advanced or p_repeat:
score += best_letter_score
matched_indices.append(best_letter_idx)
best_letter, best_lower, best_letter_idx = None, None, None
best_letter_score = 0
if next_match or rematch:
new_score = 0
# apply penalty for each letter before the first match
# using max because penalties are negative (so max = smallest)
if p_idx == 0:
score += max(s_idx * lead_penalty, max_lead_penalty)
# apply bonus for consecutive matches
if prev_match:
new_score += adj_bonus
# apply bonus for matches after a separator
if prev_sep:
new_score += sep_bonus
# apply bonus across camelCase boundaries
if prev_lower and s_char == s_upper and s_lower != s_upper:
new_score += camel_bonus
# update pattern index iff the next pattern letter was matched
if next_match:
p_idx += 1
# update best letter match (may be next or rematch)
if new_score >= best_letter_score:
# apply penalty for now-skipped letter
if best_letter is not None:
score += unmatched_penalty
best_letter = s_char
best_lower = best_letter.lower()
best_letter_idx = s_idx
best_letter_score = new_score
prev_match = True
else:
score += unmatched_penalty
prev_match = False
prev_lower = s_char == s_lower and s_lower != s_upper
prev_sep = s_char in '_ '
s_idx += 1
if best_letter:
score += best_letter_score
matched_indices.append(best_letter_idx)
return p_idx == p_len, score
|
flexible
|
{
"blob_id": "576bb15ad081cd368265c98875be5d032cdafd22",
"index": 4789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=\n 10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n while s_idx != s_len:\n p_char = pattern[p_idx] if p_idx != p_len else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n if next_match or rematch:\n new_score = 0\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n if prev_match:\n new_score += adj_bonus\n if prev_sep:\n new_score += sep_bonus\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n if next_match:\n p_idx += 1\n if new_score >= best_letter_score:\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n prev_match = True\n else:\n score += unmatched_penalty\n prev_match = False\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n s_idx += 1\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n return p_idx == p_len, score\n",
"step-3": "<mask token>\n\n\ndef fuzzy_match_simple(pattern, instring):\n \"\"\"Return True if each character in pattern is found in order in instring.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n\n :return: True if there is a match, False otherwise\n :rtype: ``bool``\n \"\"\"\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while p_idx != p_len and s_idx != s_len:\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=\n 10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n while s_idx != s_len:\n p_char = pattern[p_idx] if p_idx != p_len else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n if next_match or rematch:\n new_score = 0\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n if prev_match:\n new_score += adj_bonus\n if prev_sep:\n new_score += sep_bonus\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n if next_match:\n p_idx += 1\n if new_score >= best_letter_score:\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n prev_match = True\n else:\n score += unmatched_penalty\n prev_match = False\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n s_idx += 1\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n return p_idx == p_len, score\n",
"step-4": "<mask token>\nfrom __future__ import unicode_literals\n\n\ndef fuzzy_match_simple(pattern, instring):\n \"\"\"Return True if each character in pattern is found in order in instring.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n\n :return: True if there is a match, False otherwise\n :rtype: ``bool``\n \"\"\"\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while p_idx != p_len and s_idx != s_len:\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=\n 10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n while s_idx != s_len:\n p_char = pattern[p_idx] if p_idx != p_len else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n if next_match or rematch:\n new_score = 0\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n if prev_match:\n new_score += adj_bonus\n if prev_sep:\n new_score += sep_bonus\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n if next_match:\n p_idx += 1\n if new_score >= best_letter_score:\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n prev_match = True\n else:\n score += unmatched_penalty\n prev_match = False\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n s_idx += 1\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n return p_idx == p_len, score\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMIT License\n\nCopyright (c) 2016 Matt Menzenski\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\n\ndef fuzzy_match_simple(pattern, instring):\n \"\"\"Return True if each character in pattern is found in order in instring.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n\n :return: True if there is a match, False otherwise\n :rtype: ``bool``\n \"\"\"\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while (p_idx != p_len) and (s_idx != s_len):\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len\n\n\ndef fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10,\n lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1):\n \"\"\"Return match boolean and match score.\n\n :param pattern: the pattern to be matched\n :type pattern: ``str``\n :param instring: the containing string to search against\n :type instring: ``str``\n :param int adj_bonus: bonus for adjacent matches\n :param int sep_bonus: bonus if match occurs after a separator\n :param int camel_bonus: bonus if match is uppercase\n :param int lead_penalty: penalty applied for each letter before 1st match\n :param int max_lead_penalty: maximum total ``lead_penalty``\n :param int unmatched_penalty: penalty for each unmatched letter\n\n :return: 2-tuple with match truthiness at idx 0 and score at idx 1\n :rtype: ``tuple``\n \"\"\"\n score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring)\n prev_match, prev_lower = False, False\n prev_sep = True # so that matching first letter gets sep_bonus\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n matched_indices = []\n\n while s_idx != s_len:\n p_char = pattern[p_idx] if (p_idx != p_len) else None\n s_char = instring[s_idx]\n p_lower = p_char.lower() if p_char else None\n s_lower, s_upper = s_char.lower(), s_char.upper()\n\n next_match = p_char and p_lower == s_lower\n rematch = best_letter and best_lower == s_lower\n\n advanced = next_match and best_letter\n p_repeat = best_letter and p_char and best_lower == p_lower\n\n if advanced or p_repeat:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n best_letter, best_lower, best_letter_idx = None, None, None\n best_letter_score = 0\n\n if next_match or rematch:\n new_score = 0\n\n # apply penalty for each letter before the first match\n # using max because penalties are negative (so max = smallest)\n if p_idx == 0:\n score += max(s_idx * lead_penalty, max_lead_penalty)\n\n # apply bonus for consecutive matches\n if prev_match:\n new_score += adj_bonus\n\n # apply bonus for matches after a separator\n if prev_sep:\n new_score += sep_bonus\n\n # apply bonus across camelCase boundaries\n if prev_lower and s_char == s_upper and s_lower != s_upper:\n new_score += camel_bonus\n\n # update pattern index iff the next pattern letter was matched\n if next_match:\n p_idx += 1\n\n # update best letter match (may be next or rematch)\n if new_score >= best_letter_score:\n # apply penalty for now-skipped letter\n if best_letter is not None:\n score += unmatched_penalty\n best_letter = s_char\n best_lower = best_letter.lower()\n best_letter_idx = s_idx\n best_letter_score = new_score\n\n prev_match = True\n\n else:\n score += unmatched_penalty\n prev_match = False\n\n prev_lower = s_char == s_lower and s_lower != s_upper\n prev_sep = s_char in '_ '\n\n s_idx += 1\n\n if best_letter:\n score += best_letter_score\n matched_indices.append(best_letter_idx)\n\n return p_idx == p_len, score",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for game in raw_scores:
game_len = len(game) + 1
total = 0
prev = None
player = 1
max_so_far = -100
min_so_far = 100
max_drop = 0
max_gain = 0
white_improve = [0]
black_improve = [0]
game_nums = [0]
for score in game:
if score != 'NA':
score = int(score)
game_nums.append(score)
total += score
if prev != None:
change = score - prev
if change < max_drop:
max_drop = change
if change > max_gain:
max_gain = change
if player == 1:
black_improve.append(change)
else:
white_improve.append(change)
player = abs(player - 1)
prev = score
if score > max_so_far:
max_so_far = score
if score < min_so_far:
min_so_far = score
white_avg = sum(white_improve) / (game_len / 2)
black_avg = sum(black_improve) / (game_len / 2)
game_length.append(game_len)
average_score.append(total / game_len)
score_stdev.append(np.std(np.array(game_nums)))
largest_gain.append(max_gain)
largest_drop.append(max_drop)
max_score.append(max_so_far)
min_score.append(min_so_far)
white_avg_improve.append(white_avg)
black_avg_improve.append(black_avg)
white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])
black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])
white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /
len(white_improve) // 4)
white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(
white_improve) // 4 * 2]) / len(white_improve) // 4)
white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:
len(white_improve) // 4 * 3]) / len(white_improve) // 4)
white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]
) / len(white_improve) // 4)
black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /
len(black_improve) // 4)
black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(
black_improve) // 4 * 2]) / len(black_improve) // 4)
black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:
len(black_improve) // 4 * 3]) / len(black_improve) // 4)
black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]
) / len(black_improve) // 4)
white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))
white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(
white_improve) // 4 * 2]))
white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 +
len(white_improve) // 4 * 3]))
white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))
black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))
black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(
black_improve) // 4 * 2]))
black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 +
len(black_improve) // 4 * 3]))
black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))
white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))
white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(
white_improve) // 4 * 2]))
white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 +
len(white_improve) // 4 * 3]))
white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))
black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))
black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(
black_improve) // 4 * 2]))
black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 +
len(black_improve) // 4 * 3]))
black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))
white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve
) // 4])))
white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4:len(white_improve) // 4 * 2])))
white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4 * 2:len(white_improve) // 4 * 3])))
white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4 * 3:])))
black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve
) // 4])))
black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4:len(black_improve) // 4 * 2])))
black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4 * 2:len(black_improve) // 4 * 3])))
black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4 * 3:])))
if len(white_improve) >= 5:
white_5_improve.append(sum(white_improve[0:5]) / 5)
else:
white_5_improve.append(white_avg)
if len(white_improve) >= 10:
white_10_improve.append(sum(white_improve[5:10]) / 5)
else:
white_10_improve.append(white_avg)
if len(white_improve) >= 15:
white_15_improve.append(sum(white_improve[10:15]) / 5)
else:
white_15_improve.append(white_avg)
if len(white_improve) >= 20:
white_20_improve.append(sum(white_improve[15:20]) / 5)
else:
white_20_improve.append(white_avg)
if len(white_improve) >= 25:
white_25_improve.append(sum(white_improve[20:25]) / 5)
else:
white_25_improve.append(white_avg)
if len(white_improve) >= 30:
white_30_improve.append(sum(white_improve[25:30]) / 5)
else:
white_30_improve.append(white_avg)
if len(white_improve) >= 35:
white_35_improve.append(sum(white_improve[30:35]) / 5)
else:
white_35_improve.append(white_avg)
if len(white_improve) >= 40:
white_40_improve.append(sum(white_improve[35:40]) / 5)
else:
white_40_improve.append(white_avg)
if len(white_improve) >= 45:
white_45_improve.append(sum(white_improve[40:45]) / 5)
else:
white_45_improve.append(white_avg)
if len(white_improve) >= 50:
white_50_improve.append(sum(white_improve[45:50]) / 5)
else:
white_50_improve.append(white_avg)
if len(white_improve) >= 55:
white_55_improve.append(sum(white_improve[50:55]) / 5)
else:
white_55_improve.append(white_avg)
if len(white_improve) >= 60:
white_60_improve.append(sum(white_improve[55:60]) / 5)
else:
white_60_improve.append(white_avg)
if len(white_improve) >= 65:
white_65_improve.append(sum(white_improve[60:65]) / 5)
else:
white_65_improve.append(white_avg)
if len(white_improve) >= 70:
white_70_improve.append(sum(white_improve[65:70]) / 5)
else:
white_70_improve.append(white_avg)
if len(white_improve) >= 75:
white_75_improve.append(sum(white_improve[70:75]) / 5)
else:
white_75_improve.append(white_avg)
if len(black_improve) >= 5:
black_5_improve.append(sum(black_improve[0:5]) / 5)
else:
black_5_improve.append(black_avg)
if len(black_improve) >= 10:
black_10_improve.append(sum(black_improve[5:10]) / 5)
else:
black_10_improve.append(black_avg)
if len(black_improve) >= 15:
black_15_improve.append(sum(black_improve[10:15]) / 5)
else:
black_15_improve.append(black_avg)
if len(black_improve) >= 20:
black_20_improve.append(sum(black_improve[15:20]) / 5)
else:
black_20_improve.append(black_avg)
if len(black_improve) >= 25:
black_25_improve.append(sum(black_improve[20:25]) / 5)
else:
black_25_improve.append(black_avg)
if len(black_improve) >= 30:
black_30_improve.append(sum(black_improve[25:30]) / 5)
else:
black_30_improve.append(black_avg)
if len(black_improve) >= 35:
black_35_improve.append(sum(black_improve[30:35]) / 5)
else:
black_35_improve.append(black_avg)
if len(black_improve) >= 40:
black_40_improve.append(sum(black_improve[35:40]) / 5)
else:
black_40_improve.append(black_avg)
if len(black_improve) >= 45:
black_45_improve.append(sum(black_improve[40:45]) / 5)
else:
black_45_improve.append(black_avg)
if len(black_improve) >= 50:
black_50_improve.append(sum(black_improve[45:50]) / 5)
else:
black_50_improve.append(black_avg)
if len(black_improve) >= 55:
black_55_improve.append(sum(black_improve[50:55]) / 5)
else:
black_55_improve.append(black_avg)
if len(black_improve) >= 60:
black_60_improve.append(sum(black_improve[55:60]) / 5)
else:
black_60_improve.append(black_avg)
if len(black_improve) >= 65:
black_65_improve.append(sum(black_improve[60:65]) / 5)
else:
black_65_improve.append(black_avg)
if len(black_improve) >= 70:
black_70_improve.append(sum(black_improve[65:70]) / 5)
else:
black_70_improve.append(black_avg)
if len(black_improve) >= 75:
black_75_improve.append(sum(black_improve[70:75]) / 5)
else:
black_75_improve.append(black_avg)
if len(game_nums) > 10:
game_score10.append(game_nums[10])
else:
game_score10.append(0)
if len(game_nums) > 20:
game_score20.append(game_nums[20])
else:
game_score20.append(0)
if len(game_nums) > 30:
game_score30.append(game_nums[30])
else:
game_score30.append(0)
if len(game_nums) > 40:
game_score40.append(game_nums[40])
else:
game_score40.append(0)
if len(game_nums) > 50:
game_score50.append(game_nums[50])
else:
game_score50.append(0)
if len(game_nums) > 60:
game_score60.append(game_nums[60])
else:
game_score60.append(0)
if len(game_nums) > 70:
game_score70.append(game_nums[70])
else:
game_score70.append(0)
if len(game_nums) > 80:
game_score80.append(game_nums[80])
else:
game_score80.append(0)
if len(game_nums) > 90:
game_score90.append(game_nums[90])
else:
game_score90.append(0)
if len(game_nums) > 100:
game_score100.append(game_nums[100])
else:
game_score100.append(0)
if prev:
ending_score.append(prev)
else:
ending_score.append(0)
<|reserved_special_token_0|>
chess_df.to_csv('score_features.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
raw_scores = [line.strip().split(',')[1].split() for line in open(
'stockfish.csv')][1:]
game_length = []
average_score = []
score_stdev = []
largest_gain = []
largest_drop = []
max_score = []
min_score = []
ending_score = []
white_avg_improve = []
black_avg_improve = []
white_median_improve = []
black_median_improve = []
white_q1_improve = []
white_q2_improve = []
white_q3_improve = []
white_q4_improve = []
black_q1_improve = []
black_q2_improve = []
black_q3_improve = []
black_q4_improve = []
game_score10 = []
game_score20 = []
game_score30 = []
game_score40 = []
game_score50 = []
game_score60 = []
game_score70 = []
game_score80 = []
game_score90 = []
game_score100 = []
white_q1_max = []
white_q2_max = []
white_q3_max = []
white_q4_max = []
black_q1_max = []
black_q2_max = []
black_q3_max = []
black_q4_max = []
white_q1_min = []
white_q2_min = []
white_q3_min = []
white_q4_min = []
black_q1_min = []
black_q2_min = []
black_q3_min = []
black_q4_min = []
white_q1_stdev = []
white_q2_stdev = []
white_q3_stdev = []
white_q4_stdev = []
black_q1_stdev = []
black_q2_stdev = []
black_q3_stdev = []
black_q4_stdev = []
white_5_improve = []
white_10_improve = []
white_15_improve = []
white_20_improve = []
white_25_improve = []
white_30_improve = []
white_35_improve = []
white_40_improve = []
white_45_improve = []
white_50_improve = []
white_55_improve = []
white_60_improve = []
white_65_improve = []
white_70_improve = []
white_75_improve = []
black_5_improve = []
black_10_improve = []
black_15_improve = []
black_20_improve = []
black_25_improve = []
black_30_improve = []
black_35_improve = []
black_40_improve = []
black_45_improve = []
black_50_improve = []
black_55_improve = []
black_60_improve = []
black_65_improve = []
black_70_improve = []
black_75_improve = []
for game in raw_scores:
game_len = len(game) + 1
total = 0
prev = None
player = 1
max_so_far = -100
min_so_far = 100
max_drop = 0
max_gain = 0
white_improve = [0]
black_improve = [0]
game_nums = [0]
for score in game:
if score != 'NA':
score = int(score)
game_nums.append(score)
total += score
if prev != None:
change = score - prev
if change < max_drop:
max_drop = change
if change > max_gain:
max_gain = change
if player == 1:
black_improve.append(change)
else:
white_improve.append(change)
player = abs(player - 1)
prev = score
if score > max_so_far:
max_so_far = score
if score < min_so_far:
min_so_far = score
white_avg = sum(white_improve) / (game_len / 2)
black_avg = sum(black_improve) / (game_len / 2)
game_length.append(game_len)
average_score.append(total / game_len)
score_stdev.append(np.std(np.array(game_nums)))
largest_gain.append(max_gain)
largest_drop.append(max_drop)
max_score.append(max_so_far)
min_score.append(min_so_far)
white_avg_improve.append(white_avg)
black_avg_improve.append(black_avg)
white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])
black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])
white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /
len(white_improve) // 4)
white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(
white_improve) // 4 * 2]) / len(white_improve) // 4)
white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:
len(white_improve) // 4 * 3]) / len(white_improve) // 4)
white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]
) / len(white_improve) // 4)
black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /
len(black_improve) // 4)
black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(
black_improve) // 4 * 2]) / len(black_improve) // 4)
black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:
len(black_improve) // 4 * 3]) / len(black_improve) // 4)
black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]
) / len(black_improve) // 4)
white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))
white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(
white_improve) // 4 * 2]))
white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 +
len(white_improve) // 4 * 3]))
white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))
black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))
black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(
black_improve) // 4 * 2]))
black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 +
len(black_improve) // 4 * 3]))
black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))
white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))
white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(
white_improve) // 4 * 2]))
white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 +
len(white_improve) // 4 * 3]))
white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))
black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))
black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(
black_improve) // 4 * 2]))
black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 +
len(black_improve) // 4 * 3]))
black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))
white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve
) // 4])))
white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4:len(white_improve) // 4 * 2])))
white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4 * 2:len(white_improve) // 4 * 3])))
white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4 * 3:])))
black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve
) // 4])))
black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4:len(black_improve) // 4 * 2])))
black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4 * 2:len(black_improve) // 4 * 3])))
black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4 * 3:])))
if len(white_improve) >= 5:
white_5_improve.append(sum(white_improve[0:5]) / 5)
else:
white_5_improve.append(white_avg)
if len(white_improve) >= 10:
white_10_improve.append(sum(white_improve[5:10]) / 5)
else:
white_10_improve.append(white_avg)
if len(white_improve) >= 15:
white_15_improve.append(sum(white_improve[10:15]) / 5)
else:
white_15_improve.append(white_avg)
if len(white_improve) >= 20:
white_20_improve.append(sum(white_improve[15:20]) / 5)
else:
white_20_improve.append(white_avg)
if len(white_improve) >= 25:
white_25_improve.append(sum(white_improve[20:25]) / 5)
else:
white_25_improve.append(white_avg)
if len(white_improve) >= 30:
white_30_improve.append(sum(white_improve[25:30]) / 5)
else:
white_30_improve.append(white_avg)
if len(white_improve) >= 35:
white_35_improve.append(sum(white_improve[30:35]) / 5)
else:
white_35_improve.append(white_avg)
if len(white_improve) >= 40:
white_40_improve.append(sum(white_improve[35:40]) / 5)
else:
white_40_improve.append(white_avg)
if len(white_improve) >= 45:
white_45_improve.append(sum(white_improve[40:45]) / 5)
else:
white_45_improve.append(white_avg)
if len(white_improve) >= 50:
white_50_improve.append(sum(white_improve[45:50]) / 5)
else:
white_50_improve.append(white_avg)
if len(white_improve) >= 55:
white_55_improve.append(sum(white_improve[50:55]) / 5)
else:
white_55_improve.append(white_avg)
if len(white_improve) >= 60:
white_60_improve.append(sum(white_improve[55:60]) / 5)
else:
white_60_improve.append(white_avg)
if len(white_improve) >= 65:
white_65_improve.append(sum(white_improve[60:65]) / 5)
else:
white_65_improve.append(white_avg)
if len(white_improve) >= 70:
white_70_improve.append(sum(white_improve[65:70]) / 5)
else:
white_70_improve.append(white_avg)
if len(white_improve) >= 75:
white_75_improve.append(sum(white_improve[70:75]) / 5)
else:
white_75_improve.append(white_avg)
if len(black_improve) >= 5:
black_5_improve.append(sum(black_improve[0:5]) / 5)
else:
black_5_improve.append(black_avg)
if len(black_improve) >= 10:
black_10_improve.append(sum(black_improve[5:10]) / 5)
else:
black_10_improve.append(black_avg)
if len(black_improve) >= 15:
black_15_improve.append(sum(black_improve[10:15]) / 5)
else:
black_15_improve.append(black_avg)
if len(black_improve) >= 20:
black_20_improve.append(sum(black_improve[15:20]) / 5)
else:
black_20_improve.append(black_avg)
if len(black_improve) >= 25:
black_25_improve.append(sum(black_improve[20:25]) / 5)
else:
black_25_improve.append(black_avg)
if len(black_improve) >= 30:
black_30_improve.append(sum(black_improve[25:30]) / 5)
else:
black_30_improve.append(black_avg)
if len(black_improve) >= 35:
black_35_improve.append(sum(black_improve[30:35]) / 5)
else:
black_35_improve.append(black_avg)
if len(black_improve) >= 40:
black_40_improve.append(sum(black_improve[35:40]) / 5)
else:
black_40_improve.append(black_avg)
if len(black_improve) >= 45:
black_45_improve.append(sum(black_improve[40:45]) / 5)
else:
black_45_improve.append(black_avg)
if len(black_improve) >= 50:
black_50_improve.append(sum(black_improve[45:50]) / 5)
else:
black_50_improve.append(black_avg)
if len(black_improve) >= 55:
black_55_improve.append(sum(black_improve[50:55]) / 5)
else:
black_55_improve.append(black_avg)
if len(black_improve) >= 60:
black_60_improve.append(sum(black_improve[55:60]) / 5)
else:
black_60_improve.append(black_avg)
if len(black_improve) >= 65:
black_65_improve.append(sum(black_improve[60:65]) / 5)
else:
black_65_improve.append(black_avg)
if len(black_improve) >= 70:
black_70_improve.append(sum(black_improve[65:70]) / 5)
else:
black_70_improve.append(black_avg)
if len(black_improve) >= 75:
black_75_improve.append(sum(black_improve[70:75]) / 5)
else:
black_75_improve.append(black_avg)
if len(game_nums) > 10:
game_score10.append(game_nums[10])
else:
game_score10.append(0)
if len(game_nums) > 20:
game_score20.append(game_nums[20])
else:
game_score20.append(0)
if len(game_nums) > 30:
game_score30.append(game_nums[30])
else:
game_score30.append(0)
if len(game_nums) > 40:
game_score40.append(game_nums[40])
else:
game_score40.append(0)
if len(game_nums) > 50:
game_score50.append(game_nums[50])
else:
game_score50.append(0)
if len(game_nums) > 60:
game_score60.append(game_nums[60])
else:
game_score60.append(0)
if len(game_nums) > 70:
game_score70.append(game_nums[70])
else:
game_score70.append(0)
if len(game_nums) > 80:
game_score80.append(game_nums[80])
else:
game_score80.append(0)
if len(game_nums) > 90:
game_score90.append(game_nums[90])
else:
game_score90.append(0)
if len(game_nums) > 100:
game_score100.append(game_nums[100])
else:
game_score100.append(0)
if prev:
ending_score.append(prev)
else:
ending_score.append(0)
chess_dict = {'game_length': game_length, 'average_score': average_score,
'score_stdev': score_stdev, 'largest_gain': largest_gain,
'largest_drop': largest_drop, 'max_score': max_score, 'min_score':
min_score, 'ending_score': ending_score, 'white_avg_improve':
white_avg_improve, 'black_avg_improve': black_avg_improve,
'white_median_improve': white_median_improve, 'black_median_improve':
black_median_improve, 'white_q1_improve': white_q1_improve,
'white_q2_improve': white_q2_improve, 'white_q3_improve':
white_q3_improve, 'white_q4_improve': white_q4_improve,
'black_q1_improve': black_q1_improve, 'black_q2_improve':
black_q2_improve, 'black_q3_improve': black_q3_improve,
'black_q4_improve': black_q4_improve, 'white_5_improve':
white_5_improve, 'white_10_improve': white_10_improve,
'white_15_improve': white_15_improve, 'white_20_improve':
white_20_improve, 'white_25_improve': white_25_improve,
'white_30_improve': white_30_improve, 'white_35_improve':
white_35_improve, 'white_40_improve': white_40_improve,
'white_45_improve': white_45_improve, 'white_50_improve':
white_50_improve, 'white_55_improve': white_55_improve,
'white_60_improve': white_60_improve, 'white_65_improve':
white_65_improve, 'white_70_improve': white_70_improve,
'white_75_improve': white_75_improve, 'black_5_improve':
black_5_improve, 'black_10_improve': black_10_improve,
'black_15_improve': black_15_improve, 'black_20_improve':
black_20_improve, 'black_25_improve': black_25_improve,
'black_30_improve': black_30_improve, 'black_35_improve':
black_35_improve, 'black_40_improve': black_40_improve,
'black_45_improve': black_45_improve, 'black_50_improve':
black_50_improve, 'black_55_improve': black_55_improve,
'black_60_improve': black_60_improve, 'black_65_improve':
black_65_improve, 'black_70_improve': black_70_improve,
'black_75_improve': black_75_improve, 'white_q1_max': white_q1_max,
'white_q2_max': white_q2_max, 'white_q3_max': white_q3_max,
'white_q4_max': white_q4_max, 'black_q1_max': black_q1_max,
'black_q2_max': black_q2_max, 'black_q3_max': black_q3_max,
'black_q4_max': black_q4_max, 'white_q1_min': white_q1_min,
'white_q2_min': white_q2_min, 'white_q3_min': white_q3_min,
'white_q4_min': white_q4_min, 'black_q1_min': black_q1_min,
'black_q2_min': black_q2_min, 'black_q3_min': black_q3_min,
'black_q4_min': black_q4_min, 'white_q1_stdev': white_q1_stdev,
'white_q2_stdev': white_q2_stdev, 'white_q3_stdev': white_q3_stdev,
'white_q4_stdev': white_q4_stdev, 'black_q1_stdev': black_q1_stdev,
'black_q2_stdev': black_q2_stdev, 'black_q3_stdev': black_q3_stdev,
'black_q4_stdev': black_q4_stdev, 'game_score10': game_score10,
'game_score20': game_score20, 'game_score30': game_score30,
'game_score40': game_score40, 'game_score50': game_score50,
'game_score60': game_score60, 'game_score70': game_score70,
'game_score80': game_score80, 'game_score90': game_score90,
'game_score100': game_score100}
chess_df = pd.DataFrame(chess_dict, index=[x for x in range(1, 50001)])
chess_df.index.name = 'Event'
chess_df.to_csv('score_features.csv')
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
raw_scores = [line.strip().split(',')[1].split() for line in open(
'stockfish.csv')][1:]
game_length = []
average_score = []
score_stdev = []
largest_gain = []
largest_drop = []
max_score = []
min_score = []
ending_score = []
white_avg_improve = []
black_avg_improve = []
white_median_improve = []
black_median_improve = []
white_q1_improve = []
white_q2_improve = []
white_q3_improve = []
white_q4_improve = []
black_q1_improve = []
black_q2_improve = []
black_q3_improve = []
black_q4_improve = []
game_score10 = []
game_score20 = []
game_score30 = []
game_score40 = []
game_score50 = []
game_score60 = []
game_score70 = []
game_score80 = []
game_score90 = []
game_score100 = []
white_q1_max = []
white_q2_max = []
white_q3_max = []
white_q4_max = []
black_q1_max = []
black_q2_max = []
black_q3_max = []
black_q4_max = []
white_q1_min = []
white_q2_min = []
white_q3_min = []
white_q4_min = []
black_q1_min = []
black_q2_min = []
black_q3_min = []
black_q4_min = []
white_q1_stdev = []
white_q2_stdev = []
white_q3_stdev = []
white_q4_stdev = []
black_q1_stdev = []
black_q2_stdev = []
black_q3_stdev = []
black_q4_stdev = []
white_5_improve = []
white_10_improve = []
white_15_improve = []
white_20_improve = []
white_25_improve = []
white_30_improve = []
white_35_improve = []
white_40_improve = []
white_45_improve = []
white_50_improve = []
white_55_improve = []
white_60_improve = []
white_65_improve = []
white_70_improve = []
white_75_improve = []
black_5_improve = []
black_10_improve = []
black_15_improve = []
black_20_improve = []
black_25_improve = []
black_30_improve = []
black_35_improve = []
black_40_improve = []
black_45_improve = []
black_50_improve = []
black_55_improve = []
black_60_improve = []
black_65_improve = []
black_70_improve = []
black_75_improve = []
for game in raw_scores:
game_len = len(game) + 1
total = 0
prev = None
player = 1
max_so_far = -100
min_so_far = 100
max_drop = 0
max_gain = 0
white_improve = [0]
black_improve = [0]
game_nums = [0]
for score in game:
if score != 'NA':
score = int(score)
game_nums.append(score)
total += score
if prev != None:
change = score - prev
if change < max_drop:
max_drop = change
if change > max_gain:
max_gain = change
if player == 1:
black_improve.append(change)
else:
white_improve.append(change)
player = abs(player - 1)
prev = score
if score > max_so_far:
max_so_far = score
if score < min_so_far:
min_so_far = score
white_avg = sum(white_improve) / (game_len / 2)
black_avg = sum(black_improve) / (game_len / 2)
game_length.append(game_len)
average_score.append(total / game_len)
score_stdev.append(np.std(np.array(game_nums)))
largest_gain.append(max_gain)
largest_drop.append(max_drop)
max_score.append(max_so_far)
min_score.append(min_so_far)
white_avg_improve.append(white_avg)
black_avg_improve.append(black_avg)
white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])
black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])
white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /
len(white_improve) // 4)
white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(
white_improve) // 4 * 2]) / len(white_improve) // 4)
white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:
len(white_improve) // 4 * 3]) / len(white_improve) // 4)
white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]
) / len(white_improve) // 4)
black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /
len(black_improve) // 4)
black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(
black_improve) // 4 * 2]) / len(black_improve) // 4)
black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:
len(black_improve) // 4 * 3]) / len(black_improve) // 4)
black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]
) / len(black_improve) // 4)
white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))
white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(
white_improve) // 4 * 2]))
white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 +
len(white_improve) // 4 * 3]))
white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))
black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))
black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(
black_improve) // 4 * 2]))
black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 +
len(black_improve) // 4 * 3]))
black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))
white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))
white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(
white_improve) // 4 * 2]))
white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 +
len(white_improve) // 4 * 3]))
white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))
black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))
black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(
black_improve) // 4 * 2]))
black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 +
len(black_improve) // 4 * 3]))
black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))
white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve
) // 4])))
white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4:len(white_improve) // 4 * 2])))
white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4 * 2:len(white_improve) // 4 * 3])))
white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //
4 * 3:])))
black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve
) // 4])))
black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4:len(black_improve) // 4 * 2])))
black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4 * 2:len(black_improve) // 4 * 3])))
black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //
4 * 3:])))
if len(white_improve) >= 5:
white_5_improve.append(sum(white_improve[0:5]) / 5)
else:
white_5_improve.append(white_avg)
if len(white_improve) >= 10:
white_10_improve.append(sum(white_improve[5:10]) / 5)
else:
white_10_improve.append(white_avg)
if len(white_improve) >= 15:
white_15_improve.append(sum(white_improve[10:15]) / 5)
else:
white_15_improve.append(white_avg)
if len(white_improve) >= 20:
white_20_improve.append(sum(white_improve[15:20]) / 5)
else:
white_20_improve.append(white_avg)
if len(white_improve) >= 25:
white_25_improve.append(sum(white_improve[20:25]) / 5)
else:
white_25_improve.append(white_avg)
if len(white_improve) >= 30:
white_30_improve.append(sum(white_improve[25:30]) / 5)
else:
white_30_improve.append(white_avg)
if len(white_improve) >= 35:
white_35_improve.append(sum(white_improve[30:35]) / 5)
else:
white_35_improve.append(white_avg)
if len(white_improve) >= 40:
white_40_improve.append(sum(white_improve[35:40]) / 5)
else:
white_40_improve.append(white_avg)
if len(white_improve) >= 45:
white_45_improve.append(sum(white_improve[40:45]) / 5)
else:
white_45_improve.append(white_avg)
if len(white_improve) >= 50:
white_50_improve.append(sum(white_improve[45:50]) / 5)
else:
white_50_improve.append(white_avg)
if len(white_improve) >= 55:
white_55_improve.append(sum(white_improve[50:55]) / 5)
else:
white_55_improve.append(white_avg)
if len(white_improve) >= 60:
white_60_improve.append(sum(white_improve[55:60]) / 5)
else:
white_60_improve.append(white_avg)
if len(white_improve) >= 65:
white_65_improve.append(sum(white_improve[60:65]) / 5)
else:
white_65_improve.append(white_avg)
if len(white_improve) >= 70:
white_70_improve.append(sum(white_improve[65:70]) / 5)
else:
white_70_improve.append(white_avg)
if len(white_improve) >= 75:
white_75_improve.append(sum(white_improve[70:75]) / 5)
else:
white_75_improve.append(white_avg)
if len(black_improve) >= 5:
black_5_improve.append(sum(black_improve[0:5]) / 5)
else:
black_5_improve.append(black_avg)
if len(black_improve) >= 10:
black_10_improve.append(sum(black_improve[5:10]) / 5)
else:
black_10_improve.append(black_avg)
if len(black_improve) >= 15:
black_15_improve.append(sum(black_improve[10:15]) / 5)
else:
black_15_improve.append(black_avg)
if len(black_improve) >= 20:
black_20_improve.append(sum(black_improve[15:20]) / 5)
else:
black_20_improve.append(black_avg)
if len(black_improve) >= 25:
black_25_improve.append(sum(black_improve[20:25]) / 5)
else:
black_25_improve.append(black_avg)
if len(black_improve) >= 30:
black_30_improve.append(sum(black_improve[25:30]) / 5)
else:
black_30_improve.append(black_avg)
if len(black_improve) >= 35:
black_35_improve.append(sum(black_improve[30:35]) / 5)
else:
black_35_improve.append(black_avg)
if len(black_improve) >= 40:
black_40_improve.append(sum(black_improve[35:40]) / 5)
else:
black_40_improve.append(black_avg)
if len(black_improve) >= 45:
black_45_improve.append(sum(black_improve[40:45]) / 5)
else:
black_45_improve.append(black_avg)
if len(black_improve) >= 50:
black_50_improve.append(sum(black_improve[45:50]) / 5)
else:
black_50_improve.append(black_avg)
if len(black_improve) >= 55:
black_55_improve.append(sum(black_improve[50:55]) / 5)
else:
black_55_improve.append(black_avg)
if len(black_improve) >= 60:
black_60_improve.append(sum(black_improve[55:60]) / 5)
else:
black_60_improve.append(black_avg)
if len(black_improve) >= 65:
black_65_improve.append(sum(black_improve[60:65]) / 5)
else:
black_65_improve.append(black_avg)
if len(black_improve) >= 70:
black_70_improve.append(sum(black_improve[65:70]) / 5)
else:
black_70_improve.append(black_avg)
if len(black_improve) >= 75:
black_75_improve.append(sum(black_improve[70:75]) / 5)
else:
black_75_improve.append(black_avg)
if len(game_nums) > 10:
game_score10.append(game_nums[10])
else:
game_score10.append(0)
if len(game_nums) > 20:
game_score20.append(game_nums[20])
else:
game_score20.append(0)
if len(game_nums) > 30:
game_score30.append(game_nums[30])
else:
game_score30.append(0)
if len(game_nums) > 40:
game_score40.append(game_nums[40])
else:
game_score40.append(0)
if len(game_nums) > 50:
game_score50.append(game_nums[50])
else:
game_score50.append(0)
if len(game_nums) > 60:
game_score60.append(game_nums[60])
else:
game_score60.append(0)
if len(game_nums) > 70:
game_score70.append(game_nums[70])
else:
game_score70.append(0)
if len(game_nums) > 80:
game_score80.append(game_nums[80])
else:
game_score80.append(0)
if len(game_nums) > 90:
game_score90.append(game_nums[90])
else:
game_score90.append(0)
if len(game_nums) > 100:
game_score100.append(game_nums[100])
else:
game_score100.append(0)
if prev:
ending_score.append(prev)
else:
ending_score.append(0)
chess_dict = {'game_length': game_length, 'average_score': average_score,
'score_stdev': score_stdev, 'largest_gain': largest_gain,
'largest_drop': largest_drop, 'max_score': max_score, 'min_score':
min_score, 'ending_score': ending_score, 'white_avg_improve':
white_avg_improve, 'black_avg_improve': black_avg_improve,
'white_median_improve': white_median_improve, 'black_median_improve':
black_median_improve, 'white_q1_improve': white_q1_improve,
'white_q2_improve': white_q2_improve, 'white_q3_improve':
white_q3_improve, 'white_q4_improve': white_q4_improve,
'black_q1_improve': black_q1_improve, 'black_q2_improve':
black_q2_improve, 'black_q3_improve': black_q3_improve,
'black_q4_improve': black_q4_improve, 'white_5_improve':
white_5_improve, 'white_10_improve': white_10_improve,
'white_15_improve': white_15_improve, 'white_20_improve':
white_20_improve, 'white_25_improve': white_25_improve,
'white_30_improve': white_30_improve, 'white_35_improve':
white_35_improve, 'white_40_improve': white_40_improve,
'white_45_improve': white_45_improve, 'white_50_improve':
white_50_improve, 'white_55_improve': white_55_improve,
'white_60_improve': white_60_improve, 'white_65_improve':
white_65_improve, 'white_70_improve': white_70_improve,
'white_75_improve': white_75_improve, 'black_5_improve':
black_5_improve, 'black_10_improve': black_10_improve,
'black_15_improve': black_15_improve, 'black_20_improve':
black_20_improve, 'black_25_improve': black_25_improve,
'black_30_improve': black_30_improve, 'black_35_improve':
black_35_improve, 'black_40_improve': black_40_improve,
'black_45_improve': black_45_improve, 'black_50_improve':
black_50_improve, 'black_55_improve': black_55_improve,
'black_60_improve': black_60_improve, 'black_65_improve':
black_65_improve, 'black_70_improve': black_70_improve,
'black_75_improve': black_75_improve, 'white_q1_max': white_q1_max,
'white_q2_max': white_q2_max, 'white_q3_max': white_q3_max,
'white_q4_max': white_q4_max, 'black_q1_max': black_q1_max,
'black_q2_max': black_q2_max, 'black_q3_max': black_q3_max,
'black_q4_max': black_q4_max, 'white_q1_min': white_q1_min,
'white_q2_min': white_q2_min, 'white_q3_min': white_q3_min,
'white_q4_min': white_q4_min, 'black_q1_min': black_q1_min,
'black_q2_min': black_q2_min, 'black_q3_min': black_q3_min,
'black_q4_min': black_q4_min, 'white_q1_stdev': white_q1_stdev,
'white_q2_stdev': white_q2_stdev, 'white_q3_stdev': white_q3_stdev,
'white_q4_stdev': white_q4_stdev, 'black_q1_stdev': black_q1_stdev,
'black_q2_stdev': black_q2_stdev, 'black_q3_stdev': black_q3_stdev,
'black_q4_stdev': black_q4_stdev, 'game_score10': game_score10,
'game_score20': game_score20, 'game_score30': game_score30,
'game_score40': game_score40, 'game_score50': game_score50,
'game_score60': game_score60, 'game_score70': game_score70,
'game_score80': game_score80, 'game_score90': game_score90,
'game_score100': game_score100}
chess_df = pd.DataFrame(chess_dict, index=[x for x in range(1, 50001)])
chess_df.index.name = 'Event'
chess_df.to_csv('score_features.csv')
<|reserved_special_token_1|>
#Script to extract features from chess score data file stockfish.csv
import numpy as np
import pandas as pd
#Load in and format raw chess game scoring data
raw_scores = [line.strip().split(",")[1].split() for line in open("stockfish.csv")][1:]
#Initialize containers for features to extract
game_length = []
average_score = []
score_stdev = []
largest_gain = []
largest_drop = []
max_score = []
min_score = []
ending_score = []
white_avg_improve = []
black_avg_improve = []
white_median_improve = []
black_median_improve = []
white_q1_improve =[]
white_q2_improve =[]
white_q3_improve =[]
white_q4_improve =[]
black_q1_improve =[]
black_q2_improve =[]
black_q3_improve =[]
black_q4_improve =[]
game_score10 = []
game_score20 = []
game_score30 = []
game_score40 = []
game_score50 = []
game_score60 = []
game_score70 = []
game_score80 = []
game_score90 = []
game_score100 = []
white_q1_max =[]
white_q2_max =[]
white_q3_max =[]
white_q4_max =[]
black_q1_max =[]
black_q2_max =[]
black_q3_max =[]
black_q4_max =[]
white_q1_min =[]
white_q2_min =[]
white_q3_min =[]
white_q4_min =[]
black_q1_min =[]
black_q2_min =[]
black_q3_min =[]
black_q4_min =[]
white_q1_stdev =[]
white_q2_stdev =[]
white_q3_stdev =[]
white_q4_stdev =[]
black_q1_stdev =[]
black_q2_stdev =[]
black_q3_stdev =[]
black_q4_stdev =[]
white_5_improve = []
white_10_improve = []
white_15_improve = []
white_20_improve = []
white_25_improve = []
white_30_improve = []
white_35_improve = []
white_40_improve = []
white_45_improve = []
white_50_improve = []
white_55_improve = []
white_60_improve = []
white_65_improve = []
white_70_improve = []
white_75_improve = []
black_5_improve = []
black_10_improve = []
black_15_improve = []
black_20_improve = []
black_25_improve = []
black_30_improve = []
black_35_improve = []
black_40_improve = []
black_45_improve = []
black_50_improve = []
black_55_improve = []
black_60_improve = []
black_65_improve = []
black_70_improve = []
black_75_improve = []
#Loop through game data, calculate and append new features to feature containers
for game in raw_scores:
game_len = len(game)+1 # Add 1 to game length to avoid divide by zero errors caused by empty games
total = 0
prev = None
player = 1
max_so_far = -100
min_so_far = 100
max_drop = 0
max_gain = 0
white_improve = [0]
black_improve = [0]
game_nums = [0]
for score in game:
if score != "NA":
score = int(score)
game_nums.append(score)
total+=score
if prev != None:
change = score - prev
if change < max_drop:
max_drop = change
if change > max_gain:
max_gain = change
if player == 1:
black_improve.append(change)
else:
white_improve.append(change)
player = abs(player-1)
prev = score
if score > max_so_far:
max_so_far = score
if score < min_so_far:
min_so_far = score
#Add computed values to feature containers
white_avg = sum(white_improve)/(game_len/2)
black_avg = sum(black_improve)/(game_len/2)
game_length.append(game_len)
average_score.append(total/game_len)
score_stdev.append(np.std(np.array(game_nums)))
largest_gain.append(max_gain)
largest_drop.append(max_drop)
max_score.append(max_so_far)
min_score.append(min_so_far)
white_avg_improve.append(white_avg)
black_avg_improve.append(black_avg)
white_median_improve.append(sorted(white_improve)[len(white_improve)//2])
black_median_improve.append(sorted(black_improve)[len(black_improve)//2])
white_q1_improve.append( sum(white_improve[0:len(white_improve)//4])/len(white_improve)//4 )
white_q2_improve.append( sum(white_improve[len(white_improve)//4 : (len(white_improve)//4)*2])/len(white_improve)//4 )
white_q3_improve.append( sum(white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3])/len(white_improve)//4 )
white_q4_improve.append( sum(white_improve[(len(white_improve)//4)*3 : ])/len(white_improve)//4 )
black_q1_improve.append( sum(black_improve[0:len(black_improve)//4])/len(black_improve)//4 )
black_q2_improve.append( sum(black_improve[len(black_improve)//4 : (len(black_improve)//4)*2])/len(black_improve)//4 )
black_q3_improve.append( sum(black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3])/len(black_improve)//4 )
black_q4_improve.append( sum(black_improve[(len(black_improve)//4)*3 : ])/len(black_improve)//4 )
white_q1_max.append(max(white_improve[0:1+len(white_improve)//4]))
white_q2_max.append(max(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))
white_q3_max.append(max(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))
white_q4_max.append(max(white_improve[(len(white_improve)//4)*3 : ]))
black_q1_max.append(max(black_improve[0:1+len(black_improve)//4]))
black_q2_max.append(max(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))
black_q3_max.append(max(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))
black_q4_max.append(max(black_improve[(len(black_improve)//4)*3 : ]))
white_q1_min.append(min(white_improve[0:1+len(white_improve)//4]))
white_q2_min.append(min(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))
white_q3_min.append(min(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))
white_q4_min.append(min(white_improve[(len(white_improve)//4)*3 : ]))
black_q1_min.append(min(black_improve[0:1+len(black_improve)//4]))
black_q2_min.append(min(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))
black_q3_min.append(min(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))
black_q4_min.append(min(black_improve[(len(black_improve)//4)*3 : ]))
white_q1_stdev.append(np.std(np.array((white_improve[0:len(white_improve)//4]))))
white_q2_stdev.append(np.std(np.array((white_improve[len(white_improve)//4 : (len(white_improve)//4)*2]))))
white_q3_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3]))))
white_q4_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*3 : ]))))
black_q1_stdev.append(np.std(np.array((black_improve[0:len(black_improve)//4]))))
black_q2_stdev.append(np.std(np.array((black_improve[len(black_improve)//4 : (len(black_improve)//4)*2]))))
black_q3_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3]))))
black_q4_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*3 : ]))))
if len(white_improve) >=5:
white_5_improve.append( sum(white_improve[0:5])/5 )
else:
white_5_improve.append(white_avg)
if len(white_improve) >=10:
white_10_improve.append( sum(white_improve[5:10])/5 )
else:
white_10_improve.append(white_avg)
if len(white_improve) >=15:
white_15_improve.append( sum(white_improve[10:15])/5 )
else:
white_15_improve.append(white_avg)
if len(white_improve) >=20:
white_20_improve.append( sum(white_improve[15:20])/5 )
else:
white_20_improve.append(white_avg)
if len(white_improve) >=25:
white_25_improve.append( sum(white_improve[20:25])/5 )
else:
white_25_improve.append(white_avg)
if len(white_improve) >=30:
white_30_improve.append( sum(white_improve[25:30])/5 )
else:
white_30_improve.append(white_avg)
if len(white_improve) >=35:
white_35_improve.append( sum(white_improve[30:35])/5 )
else:
white_35_improve.append(white_avg)
if len(white_improve) >=40:
white_40_improve.append( sum(white_improve[35:40])/5 )
else:
white_40_improve.append(white_avg)
if len(white_improve) >=45:
white_45_improve.append( sum(white_improve[40:45])/5 )
else:
white_45_improve.append(white_avg)
if len(white_improve) >=50:
white_50_improve.append( sum(white_improve[45:50])/5 )
else:
white_50_improve.append(white_avg)
if len(white_improve) >=55:
white_55_improve.append( sum(white_improve[50:55])/5 )
else:
white_55_improve.append(white_avg)
if len(white_improve) >=60:
white_60_improve.append( sum(white_improve[55:60])/5 )
else:
white_60_improve.append(white_avg)
if len(white_improve) >=65:
white_65_improve.append( sum(white_improve[60:65])/5 )
else:
white_65_improve.append(white_avg)
if len(white_improve) >=70:
white_70_improve.append( sum(white_improve[65:70])/5 )
else:
white_70_improve.append(white_avg)
if len(white_improve) >=75:
white_75_improve.append( sum(white_improve[70:75])/5 )
else:
white_75_improve.append(white_avg)
if len(black_improve) >=5:
black_5_improve.append( sum(black_improve[0:5])/5 )
else:
black_5_improve.append(black_avg)
if len(black_improve) >=10:
black_10_improve.append( sum(black_improve[5:10])/5 )
else:
black_10_improve.append(black_avg)
if len(black_improve) >=15:
black_15_improve.append( sum(black_improve[10:15])/5 )
else:
black_15_improve.append(black_avg)
if len(black_improve) >=20:
black_20_improve.append( sum(black_improve[15:20])/5 )
else:
black_20_improve.append(black_avg)
if len(black_improve) >=25:
black_25_improve.append( sum(black_improve[20:25])/5 )
else:
black_25_improve.append(black_avg)
if len(black_improve) >=30:
black_30_improve.append( sum(black_improve[25:30])/5 )
else:
black_30_improve.append(black_avg)
if len(black_improve) >=35:
black_35_improve.append( sum(black_improve[30:35])/5 )
else:
black_35_improve.append(black_avg)
if len(black_improve) >=40:
black_40_improve.append( sum(black_improve[35:40])/5 )
else:
black_40_improve.append(black_avg)
if len(black_improve) >=45:
black_45_improve.append( sum(black_improve[40:45])/5 )
else:
black_45_improve.append(black_avg)
if len(black_improve) >=50:
black_50_improve.append( sum(black_improve[45:50])/5 )
else:
black_50_improve.append(black_avg)
if len(black_improve) >=55:
black_55_improve.append( sum(black_improve[50:55])/5 )
else:
black_55_improve.append(black_avg)
if len(black_improve) >=60:
black_60_improve.append( sum(black_improve[55:60])/5 )
else:
black_60_improve.append(black_avg)
if len(black_improve) >=65:
black_65_improve.append( sum(black_improve[60:65])/5 )
else:
black_65_improve.append(black_avg)
if len(black_improve) >=70:
black_70_improve.append( sum(black_improve[65:70])/5 )
else:
black_70_improve.append(black_avg)
if len(black_improve) >=75:
black_75_improve.append( sum(black_improve[70:75])/5 )
else:
black_75_improve.append(black_avg)
if len(game_nums)>10:
game_score10.append(game_nums[10])
else:
game_score10.append(0)
if len(game_nums)>20:
game_score20.append(game_nums[20])
else:
game_score20.append(0)
if len(game_nums)>30:
game_score30.append(game_nums[30])
else:
game_score30.append(0)
if len(game_nums)>40:
game_score40.append(game_nums[40])
else:
game_score40.append(0)
if len(game_nums)>50:
game_score50.append(game_nums[50])
else:
game_score50.append(0)
if len(game_nums)>60:
game_score60.append(game_nums[60])
else:
game_score60.append(0)
if len(game_nums)>70:
game_score70.append(game_nums[70])
else:
game_score70.append(0)
if len(game_nums)>80:
game_score80.append(game_nums[80])
else:
game_score80.append(0)
if len(game_nums)>90:
game_score90.append(game_nums[90])
else:
game_score90.append(0)
if len(game_nums)>100:
game_score100.append(game_nums[100])
else:
game_score100.append(0)
if prev:
ending_score.append(prev)
else:
ending_score.append(0)
chess_dict = {"game_length":game_length,"average_score":average_score,"score_stdev":score_stdev,"largest_gain":largest_gain,
"largest_drop":largest_drop,"max_score":max_score,"min_score":min_score,
"ending_score":ending_score, "white_avg_improve":white_avg_improve,
"black_avg_improve":black_avg_improve,"white_median_improve":white_median_improve,
"black_median_improve":black_median_improve,"white_q1_improve":white_q1_improve,
"white_q2_improve":white_q2_improve,
"white_q3_improve":white_q3_improve,
"white_q4_improve":white_q4_improve,"black_q1_improve":black_q1_improve,
"black_q2_improve":black_q2_improve,
"black_q3_improve":black_q3_improve,
"black_q4_improve":black_q4_improve,
'white_5_improve': white_5_improve,
'white_10_improve': white_10_improve,
'white_15_improve': white_15_improve,
'white_20_improve': white_20_improve,
'white_25_improve': white_25_improve,
'white_30_improve': white_30_improve,
'white_35_improve': white_35_improve,
'white_40_improve': white_40_improve,
'white_45_improve': white_45_improve,
'white_50_improve': white_50_improve,
'white_55_improve': white_55_improve,
'white_60_improve': white_60_improve,
'white_65_improve': white_65_improve,
'white_70_improve': white_70_improve,
'white_75_improve': white_75_improve,
'black_5_improve': black_5_improve,
'black_10_improve': black_10_improve,
'black_15_improve': black_15_improve,
'black_20_improve': black_20_improve,
'black_25_improve': black_25_improve,
'black_30_improve': black_30_improve,
'black_35_improve': black_35_improve,
'black_40_improve': black_40_improve,
'black_45_improve': black_45_improve,
'black_50_improve': black_50_improve,
'black_55_improve': black_55_improve,
'black_60_improve': black_60_improve,
'black_65_improve': black_65_improve,
'black_70_improve': black_70_improve,
'black_75_improve': black_75_improve,
'white_q1_max': white_q1_max,
'white_q2_max': white_q2_max,
'white_q3_max': white_q3_max,
'white_q4_max': white_q4_max,
'black_q1_max': black_q1_max,
'black_q2_max': black_q2_max,
'black_q3_max': black_q3_max,
'black_q4_max': black_q4_max,
'white_q1_min': white_q1_min,
'white_q2_min': white_q2_min,
'white_q3_min': white_q3_min,
'white_q4_min': white_q4_min,
'black_q1_min': black_q1_min,
'black_q2_min': black_q2_min,
'black_q3_min': black_q3_min,
'black_q4_min': black_q4_min,
'white_q1_stdev': white_q1_stdev,
'white_q2_stdev': white_q2_stdev,
'white_q3_stdev': white_q3_stdev,
'white_q4_stdev': white_q4_stdev,
'black_q1_stdev': black_q1_stdev,
'black_q2_stdev': black_q2_stdev,
'black_q3_stdev': black_q3_stdev,
'black_q4_stdev': black_q4_stdev,
'game_score10':game_score10,
'game_score20':game_score20,
'game_score30':game_score30,
'game_score40':game_score40,
'game_score50':game_score50,
'game_score60':game_score60,
'game_score70':game_score70,
'game_score80':game_score80,
'game_score90':game_score90,
'game_score100':game_score100
}
#Create feature data frame
chess_df = pd.DataFrame(chess_dict, index=[x for x in range(1,50001)])
chess_df.index.name = "Event"
#Write the new feature data frame to CSV
chess_df.to_csv("score_features.csv")
|
flexible
|
{
"blob_id": "ad9bb34fdb05ab885f4871693729449f3618603a",
"index": 8321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor game in raw_scores:\n game_len = len(game) + 1\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != 'NA':\n score = int(score)\n game_nums.append(score)\n total += score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player - 1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n white_avg = sum(white_improve) / (game_len / 2)\n black_avg = sum(black_improve) / (game_len / 2)\n game_length.append(game_len)\n average_score.append(total / game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])\n black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])\n white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /\n len(white_improve) // 4)\n white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(\n white_improve) // 4 * 2]) / len(white_improve) // 4)\n white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:\n len(white_improve) // 4 * 3]) / len(white_improve) // 4)\n white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]\n ) / len(white_improve) // 4)\n black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /\n len(black_improve) // 4)\n black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(\n black_improve) // 4 * 2]) / len(black_improve) // 4)\n black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:\n len(black_improve) // 4 * 3]) / len(black_improve) // 4)\n black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]\n ) / len(black_improve) // 4)\n white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve\n ) // 4])))\n white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4:len(white_improve) // 4 * 2])))\n white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 2:len(white_improve) // 4 * 3])))\n white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 3:])))\n black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve\n ) // 4])))\n black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4:len(black_improve) // 4 * 2])))\n black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 2:len(black_improve) // 4 * 3])))\n black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 3:])))\n if len(white_improve) >= 5:\n white_5_improve.append(sum(white_improve[0:5]) / 5)\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >= 10:\n white_10_improve.append(sum(white_improve[5:10]) / 5)\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >= 15:\n white_15_improve.append(sum(white_improve[10:15]) / 5)\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >= 20:\n white_20_improve.append(sum(white_improve[15:20]) / 5)\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >= 25:\n white_25_improve.append(sum(white_improve[20:25]) / 5)\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >= 30:\n white_30_improve.append(sum(white_improve[25:30]) / 5)\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >= 35:\n white_35_improve.append(sum(white_improve[30:35]) / 5)\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >= 40:\n white_40_improve.append(sum(white_improve[35:40]) / 5)\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >= 45:\n white_45_improve.append(sum(white_improve[40:45]) / 5)\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >= 50:\n white_50_improve.append(sum(white_improve[45:50]) / 5)\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >= 55:\n white_55_improve.append(sum(white_improve[50:55]) / 5)\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >= 60:\n white_60_improve.append(sum(white_improve[55:60]) / 5)\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >= 65:\n white_65_improve.append(sum(white_improve[60:65]) / 5)\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >= 70:\n white_70_improve.append(sum(white_improve[65:70]) / 5)\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >= 75:\n white_75_improve.append(sum(white_improve[70:75]) / 5)\n else:\n white_75_improve.append(white_avg)\n if len(black_improve) >= 5:\n black_5_improve.append(sum(black_improve[0:5]) / 5)\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >= 10:\n black_10_improve.append(sum(black_improve[5:10]) / 5)\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >= 15:\n black_15_improve.append(sum(black_improve[10:15]) / 5)\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >= 20:\n black_20_improve.append(sum(black_improve[15:20]) / 5)\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >= 25:\n black_25_improve.append(sum(black_improve[20:25]) / 5)\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >= 30:\n black_30_improve.append(sum(black_improve[25:30]) / 5)\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >= 35:\n black_35_improve.append(sum(black_improve[30:35]) / 5)\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >= 40:\n black_40_improve.append(sum(black_improve[35:40]) / 5)\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >= 45:\n black_45_improve.append(sum(black_improve[40:45]) / 5)\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >= 50:\n black_50_improve.append(sum(black_improve[45:50]) / 5)\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >= 55:\n black_55_improve.append(sum(black_improve[50:55]) / 5)\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >= 60:\n black_60_improve.append(sum(black_improve[55:60]) / 5)\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >= 65:\n black_65_improve.append(sum(black_improve[60:65]) / 5)\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >= 70:\n black_70_improve.append(sum(black_improve[65:70]) / 5)\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >= 75:\n black_75_improve.append(sum(black_improve[70:75]) / 5)\n else:\n black_75_improve.append(black_avg)\n if len(game_nums) > 10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums) > 20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums) > 30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums) > 40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums) > 50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums) > 60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums) > 70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums) > 80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums) > 90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums) > 100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\n<mask token>\nchess_df.to_csv('score_features.csv')\n",
"step-3": "<mask token>\nraw_scores = [line.strip().split(',')[1].split() for line in open(\n 'stockfish.csv')][1:]\ngame_length = []\naverage_score = []\nscore_stdev = []\nlargest_gain = []\nlargest_drop = []\nmax_score = []\nmin_score = []\nending_score = []\nwhite_avg_improve = []\nblack_avg_improve = []\nwhite_median_improve = []\nblack_median_improve = []\nwhite_q1_improve = []\nwhite_q2_improve = []\nwhite_q3_improve = []\nwhite_q4_improve = []\nblack_q1_improve = []\nblack_q2_improve = []\nblack_q3_improve = []\nblack_q4_improve = []\ngame_score10 = []\ngame_score20 = []\ngame_score30 = []\ngame_score40 = []\ngame_score50 = []\ngame_score60 = []\ngame_score70 = []\ngame_score80 = []\ngame_score90 = []\ngame_score100 = []\nwhite_q1_max = []\nwhite_q2_max = []\nwhite_q3_max = []\nwhite_q4_max = []\nblack_q1_max = []\nblack_q2_max = []\nblack_q3_max = []\nblack_q4_max = []\nwhite_q1_min = []\nwhite_q2_min = []\nwhite_q3_min = []\nwhite_q4_min = []\nblack_q1_min = []\nblack_q2_min = []\nblack_q3_min = []\nblack_q4_min = []\nwhite_q1_stdev = []\nwhite_q2_stdev = []\nwhite_q3_stdev = []\nwhite_q4_stdev = []\nblack_q1_stdev = []\nblack_q2_stdev = []\nblack_q3_stdev = []\nblack_q4_stdev = []\nwhite_5_improve = []\nwhite_10_improve = []\nwhite_15_improve = []\nwhite_20_improve = []\nwhite_25_improve = []\nwhite_30_improve = []\nwhite_35_improve = []\nwhite_40_improve = []\nwhite_45_improve = []\nwhite_50_improve = []\nwhite_55_improve = []\nwhite_60_improve = []\nwhite_65_improve = []\nwhite_70_improve = []\nwhite_75_improve = []\nblack_5_improve = []\nblack_10_improve = []\nblack_15_improve = []\nblack_20_improve = []\nblack_25_improve = []\nblack_30_improve = []\nblack_35_improve = []\nblack_40_improve = []\nblack_45_improve = []\nblack_50_improve = []\nblack_55_improve = []\nblack_60_improve = []\nblack_65_improve = []\nblack_70_improve = []\nblack_75_improve = []\nfor game in raw_scores:\n game_len = len(game) + 1\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != 'NA':\n score = int(score)\n game_nums.append(score)\n total += score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player - 1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n white_avg = sum(white_improve) / (game_len / 2)\n black_avg = sum(black_improve) / (game_len / 2)\n game_length.append(game_len)\n average_score.append(total / game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])\n black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])\n white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /\n len(white_improve) // 4)\n white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(\n white_improve) // 4 * 2]) / len(white_improve) // 4)\n white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:\n len(white_improve) // 4 * 3]) / len(white_improve) // 4)\n white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]\n ) / len(white_improve) // 4)\n black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /\n len(black_improve) // 4)\n black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(\n black_improve) // 4 * 2]) / len(black_improve) // 4)\n black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:\n len(black_improve) // 4 * 3]) / len(black_improve) // 4)\n black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]\n ) / len(black_improve) // 4)\n white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve\n ) // 4])))\n white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4:len(white_improve) // 4 * 2])))\n white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 2:len(white_improve) // 4 * 3])))\n white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 3:])))\n black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve\n ) // 4])))\n black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4:len(black_improve) // 4 * 2])))\n black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 2:len(black_improve) // 4 * 3])))\n black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 3:])))\n if len(white_improve) >= 5:\n white_5_improve.append(sum(white_improve[0:5]) / 5)\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >= 10:\n white_10_improve.append(sum(white_improve[5:10]) / 5)\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >= 15:\n white_15_improve.append(sum(white_improve[10:15]) / 5)\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >= 20:\n white_20_improve.append(sum(white_improve[15:20]) / 5)\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >= 25:\n white_25_improve.append(sum(white_improve[20:25]) / 5)\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >= 30:\n white_30_improve.append(sum(white_improve[25:30]) / 5)\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >= 35:\n white_35_improve.append(sum(white_improve[30:35]) / 5)\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >= 40:\n white_40_improve.append(sum(white_improve[35:40]) / 5)\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >= 45:\n white_45_improve.append(sum(white_improve[40:45]) / 5)\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >= 50:\n white_50_improve.append(sum(white_improve[45:50]) / 5)\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >= 55:\n white_55_improve.append(sum(white_improve[50:55]) / 5)\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >= 60:\n white_60_improve.append(sum(white_improve[55:60]) / 5)\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >= 65:\n white_65_improve.append(sum(white_improve[60:65]) / 5)\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >= 70:\n white_70_improve.append(sum(white_improve[65:70]) / 5)\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >= 75:\n white_75_improve.append(sum(white_improve[70:75]) / 5)\n else:\n white_75_improve.append(white_avg)\n if len(black_improve) >= 5:\n black_5_improve.append(sum(black_improve[0:5]) / 5)\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >= 10:\n black_10_improve.append(sum(black_improve[5:10]) / 5)\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >= 15:\n black_15_improve.append(sum(black_improve[10:15]) / 5)\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >= 20:\n black_20_improve.append(sum(black_improve[15:20]) / 5)\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >= 25:\n black_25_improve.append(sum(black_improve[20:25]) / 5)\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >= 30:\n black_30_improve.append(sum(black_improve[25:30]) / 5)\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >= 35:\n black_35_improve.append(sum(black_improve[30:35]) / 5)\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >= 40:\n black_40_improve.append(sum(black_improve[35:40]) / 5)\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >= 45:\n black_45_improve.append(sum(black_improve[40:45]) / 5)\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >= 50:\n black_50_improve.append(sum(black_improve[45:50]) / 5)\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >= 55:\n black_55_improve.append(sum(black_improve[50:55]) / 5)\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >= 60:\n black_60_improve.append(sum(black_improve[55:60]) / 5)\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >= 65:\n black_65_improve.append(sum(black_improve[60:65]) / 5)\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >= 70:\n black_70_improve.append(sum(black_improve[65:70]) / 5)\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >= 75:\n black_75_improve.append(sum(black_improve[70:75]) / 5)\n else:\n black_75_improve.append(black_avg)\n if len(game_nums) > 10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums) > 20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums) > 30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums) > 40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums) > 50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums) > 60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums) > 70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums) > 80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums) > 90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums) > 100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\nchess_dict = {'game_length': game_length, 'average_score': average_score,\n 'score_stdev': score_stdev, 'largest_gain': largest_gain,\n 'largest_drop': largest_drop, 'max_score': max_score, 'min_score':\n min_score, 'ending_score': ending_score, 'white_avg_improve':\n white_avg_improve, 'black_avg_improve': black_avg_improve,\n 'white_median_improve': white_median_improve, 'black_median_improve':\n black_median_improve, 'white_q1_improve': white_q1_improve,\n 'white_q2_improve': white_q2_improve, 'white_q3_improve':\n white_q3_improve, 'white_q4_improve': white_q4_improve,\n 'black_q1_improve': black_q1_improve, 'black_q2_improve':\n black_q2_improve, 'black_q3_improve': black_q3_improve,\n 'black_q4_improve': black_q4_improve, 'white_5_improve':\n white_5_improve, 'white_10_improve': white_10_improve,\n 'white_15_improve': white_15_improve, 'white_20_improve':\n white_20_improve, 'white_25_improve': white_25_improve,\n 'white_30_improve': white_30_improve, 'white_35_improve':\n white_35_improve, 'white_40_improve': white_40_improve,\n 'white_45_improve': white_45_improve, 'white_50_improve':\n white_50_improve, 'white_55_improve': white_55_improve,\n 'white_60_improve': white_60_improve, 'white_65_improve':\n white_65_improve, 'white_70_improve': white_70_improve,\n 'white_75_improve': white_75_improve, 'black_5_improve':\n black_5_improve, 'black_10_improve': black_10_improve,\n 'black_15_improve': black_15_improve, 'black_20_improve':\n black_20_improve, 'black_25_improve': black_25_improve,\n 'black_30_improve': black_30_improve, 'black_35_improve':\n black_35_improve, 'black_40_improve': black_40_improve,\n 'black_45_improve': black_45_improve, 'black_50_improve':\n black_50_improve, 'black_55_improve': black_55_improve,\n 'black_60_improve': black_60_improve, 'black_65_improve':\n black_65_improve, 'black_70_improve': black_70_improve,\n 'black_75_improve': black_75_improve, 'white_q1_max': white_q1_max,\n 'white_q2_max': white_q2_max, 'white_q3_max': white_q3_max,\n 'white_q4_max': white_q4_max, 'black_q1_max': black_q1_max,\n 'black_q2_max': black_q2_max, 'black_q3_max': black_q3_max,\n 'black_q4_max': black_q4_max, 'white_q1_min': white_q1_min,\n 'white_q2_min': white_q2_min, 'white_q3_min': white_q3_min,\n 'white_q4_min': white_q4_min, 'black_q1_min': black_q1_min,\n 'black_q2_min': black_q2_min, 'black_q3_min': black_q3_min,\n 'black_q4_min': black_q4_min, 'white_q1_stdev': white_q1_stdev,\n 'white_q2_stdev': white_q2_stdev, 'white_q3_stdev': white_q3_stdev,\n 'white_q4_stdev': white_q4_stdev, 'black_q1_stdev': black_q1_stdev,\n 'black_q2_stdev': black_q2_stdev, 'black_q3_stdev': black_q3_stdev,\n 'black_q4_stdev': black_q4_stdev, 'game_score10': game_score10,\n 'game_score20': game_score20, 'game_score30': game_score30,\n 'game_score40': game_score40, 'game_score50': game_score50,\n 'game_score60': game_score60, 'game_score70': game_score70,\n 'game_score80': game_score80, 'game_score90': game_score90,\n 'game_score100': game_score100}\nchess_df = pd.DataFrame(chess_dict, index=[x for x in range(1, 50001)])\nchess_df.index.name = 'Event'\nchess_df.to_csv('score_features.csv')\n",
"step-4": "import numpy as np\nimport pandas as pd\nraw_scores = [line.strip().split(',')[1].split() for line in open(\n 'stockfish.csv')][1:]\ngame_length = []\naverage_score = []\nscore_stdev = []\nlargest_gain = []\nlargest_drop = []\nmax_score = []\nmin_score = []\nending_score = []\nwhite_avg_improve = []\nblack_avg_improve = []\nwhite_median_improve = []\nblack_median_improve = []\nwhite_q1_improve = []\nwhite_q2_improve = []\nwhite_q3_improve = []\nwhite_q4_improve = []\nblack_q1_improve = []\nblack_q2_improve = []\nblack_q3_improve = []\nblack_q4_improve = []\ngame_score10 = []\ngame_score20 = []\ngame_score30 = []\ngame_score40 = []\ngame_score50 = []\ngame_score60 = []\ngame_score70 = []\ngame_score80 = []\ngame_score90 = []\ngame_score100 = []\nwhite_q1_max = []\nwhite_q2_max = []\nwhite_q3_max = []\nwhite_q4_max = []\nblack_q1_max = []\nblack_q2_max = []\nblack_q3_max = []\nblack_q4_max = []\nwhite_q1_min = []\nwhite_q2_min = []\nwhite_q3_min = []\nwhite_q4_min = []\nblack_q1_min = []\nblack_q2_min = []\nblack_q3_min = []\nblack_q4_min = []\nwhite_q1_stdev = []\nwhite_q2_stdev = []\nwhite_q3_stdev = []\nwhite_q4_stdev = []\nblack_q1_stdev = []\nblack_q2_stdev = []\nblack_q3_stdev = []\nblack_q4_stdev = []\nwhite_5_improve = []\nwhite_10_improve = []\nwhite_15_improve = []\nwhite_20_improve = []\nwhite_25_improve = []\nwhite_30_improve = []\nwhite_35_improve = []\nwhite_40_improve = []\nwhite_45_improve = []\nwhite_50_improve = []\nwhite_55_improve = []\nwhite_60_improve = []\nwhite_65_improve = []\nwhite_70_improve = []\nwhite_75_improve = []\nblack_5_improve = []\nblack_10_improve = []\nblack_15_improve = []\nblack_20_improve = []\nblack_25_improve = []\nblack_30_improve = []\nblack_35_improve = []\nblack_40_improve = []\nblack_45_improve = []\nblack_50_improve = []\nblack_55_improve = []\nblack_60_improve = []\nblack_65_improve = []\nblack_70_improve = []\nblack_75_improve = []\nfor game in raw_scores:\n game_len = len(game) + 1\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != 'NA':\n score = int(score)\n game_nums.append(score)\n total += score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player - 1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n white_avg = sum(white_improve) / (game_len / 2)\n black_avg = sum(black_improve) / (game_len / 2)\n game_length.append(game_len)\n average_score.append(total / game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])\n black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])\n white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /\n len(white_improve) // 4)\n white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(\n white_improve) // 4 * 2]) / len(white_improve) // 4)\n white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:\n len(white_improve) // 4 * 3]) / len(white_improve) // 4)\n white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]\n ) / len(white_improve) // 4)\n black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /\n len(black_improve) // 4)\n black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(\n black_improve) // 4 * 2]) / len(black_improve) // 4)\n black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:\n len(black_improve) // 4 * 3]) / len(black_improve) // 4)\n black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]\n ) / len(black_improve) // 4)\n white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve\n ) // 4])))\n white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4:len(white_improve) // 4 * 2])))\n white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 2:len(white_improve) // 4 * 3])))\n white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 3:])))\n black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve\n ) // 4])))\n black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4:len(black_improve) // 4 * 2])))\n black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 2:len(black_improve) // 4 * 3])))\n black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 3:])))\n if len(white_improve) >= 5:\n white_5_improve.append(sum(white_improve[0:5]) / 5)\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >= 10:\n white_10_improve.append(sum(white_improve[5:10]) / 5)\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >= 15:\n white_15_improve.append(sum(white_improve[10:15]) / 5)\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >= 20:\n white_20_improve.append(sum(white_improve[15:20]) / 5)\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >= 25:\n white_25_improve.append(sum(white_improve[20:25]) / 5)\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >= 30:\n white_30_improve.append(sum(white_improve[25:30]) / 5)\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >= 35:\n white_35_improve.append(sum(white_improve[30:35]) / 5)\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >= 40:\n white_40_improve.append(sum(white_improve[35:40]) / 5)\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >= 45:\n white_45_improve.append(sum(white_improve[40:45]) / 5)\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >= 50:\n white_50_improve.append(sum(white_improve[45:50]) / 5)\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >= 55:\n white_55_improve.append(sum(white_improve[50:55]) / 5)\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >= 60:\n white_60_improve.append(sum(white_improve[55:60]) / 5)\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >= 65:\n white_65_improve.append(sum(white_improve[60:65]) / 5)\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >= 70:\n white_70_improve.append(sum(white_improve[65:70]) / 5)\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >= 75:\n white_75_improve.append(sum(white_improve[70:75]) / 5)\n else:\n white_75_improve.append(white_avg)\n if len(black_improve) >= 5:\n black_5_improve.append(sum(black_improve[0:5]) / 5)\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >= 10:\n black_10_improve.append(sum(black_improve[5:10]) / 5)\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >= 15:\n black_15_improve.append(sum(black_improve[10:15]) / 5)\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >= 20:\n black_20_improve.append(sum(black_improve[15:20]) / 5)\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >= 25:\n black_25_improve.append(sum(black_improve[20:25]) / 5)\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >= 30:\n black_30_improve.append(sum(black_improve[25:30]) / 5)\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >= 35:\n black_35_improve.append(sum(black_improve[30:35]) / 5)\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >= 40:\n black_40_improve.append(sum(black_improve[35:40]) / 5)\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >= 45:\n black_45_improve.append(sum(black_improve[40:45]) / 5)\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >= 50:\n black_50_improve.append(sum(black_improve[45:50]) / 5)\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >= 55:\n black_55_improve.append(sum(black_improve[50:55]) / 5)\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >= 60:\n black_60_improve.append(sum(black_improve[55:60]) / 5)\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >= 65:\n black_65_improve.append(sum(black_improve[60:65]) / 5)\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >= 70:\n black_70_improve.append(sum(black_improve[65:70]) / 5)\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >= 75:\n black_75_improve.append(sum(black_improve[70:75]) / 5)\n else:\n black_75_improve.append(black_avg)\n if len(game_nums) > 10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums) > 20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums) > 30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums) > 40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums) > 50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums) > 60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums) > 70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums) > 80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums) > 90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums) > 100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\nchess_dict = {'game_length': game_length, 'average_score': average_score,\n 'score_stdev': score_stdev, 'largest_gain': largest_gain,\n 'largest_drop': largest_drop, 'max_score': max_score, 'min_score':\n min_score, 'ending_score': ending_score, 'white_avg_improve':\n white_avg_improve, 'black_avg_improve': black_avg_improve,\n 'white_median_improve': white_median_improve, 'black_median_improve':\n black_median_improve, 'white_q1_improve': white_q1_improve,\n 'white_q2_improve': white_q2_improve, 'white_q3_improve':\n white_q3_improve, 'white_q4_improve': white_q4_improve,\n 'black_q1_improve': black_q1_improve, 'black_q2_improve':\n black_q2_improve, 'black_q3_improve': black_q3_improve,\n 'black_q4_improve': black_q4_improve, 'white_5_improve':\n white_5_improve, 'white_10_improve': white_10_improve,\n 'white_15_improve': white_15_improve, 'white_20_improve':\n white_20_improve, 'white_25_improve': white_25_improve,\n 'white_30_improve': white_30_improve, 'white_35_improve':\n white_35_improve, 'white_40_improve': white_40_improve,\n 'white_45_improve': white_45_improve, 'white_50_improve':\n white_50_improve, 'white_55_improve': white_55_improve,\n 'white_60_improve': white_60_improve, 'white_65_improve':\n white_65_improve, 'white_70_improve': white_70_improve,\n 'white_75_improve': white_75_improve, 'black_5_improve':\n black_5_improve, 'black_10_improve': black_10_improve,\n 'black_15_improve': black_15_improve, 'black_20_improve':\n black_20_improve, 'black_25_improve': black_25_improve,\n 'black_30_improve': black_30_improve, 'black_35_improve':\n black_35_improve, 'black_40_improve': black_40_improve,\n 'black_45_improve': black_45_improve, 'black_50_improve':\n black_50_improve, 'black_55_improve': black_55_improve,\n 'black_60_improve': black_60_improve, 'black_65_improve':\n black_65_improve, 'black_70_improve': black_70_improve,\n 'black_75_improve': black_75_improve, 'white_q1_max': white_q1_max,\n 'white_q2_max': white_q2_max, 'white_q3_max': white_q3_max,\n 'white_q4_max': white_q4_max, 'black_q1_max': black_q1_max,\n 'black_q2_max': black_q2_max, 'black_q3_max': black_q3_max,\n 'black_q4_max': black_q4_max, 'white_q1_min': white_q1_min,\n 'white_q2_min': white_q2_min, 'white_q3_min': white_q3_min,\n 'white_q4_min': white_q4_min, 'black_q1_min': black_q1_min,\n 'black_q2_min': black_q2_min, 'black_q3_min': black_q3_min,\n 'black_q4_min': black_q4_min, 'white_q1_stdev': white_q1_stdev,\n 'white_q2_stdev': white_q2_stdev, 'white_q3_stdev': white_q3_stdev,\n 'white_q4_stdev': white_q4_stdev, 'black_q1_stdev': black_q1_stdev,\n 'black_q2_stdev': black_q2_stdev, 'black_q3_stdev': black_q3_stdev,\n 'black_q4_stdev': black_q4_stdev, 'game_score10': game_score10,\n 'game_score20': game_score20, 'game_score30': game_score30,\n 'game_score40': game_score40, 'game_score50': game_score50,\n 'game_score60': game_score60, 'game_score70': game_score70,\n 'game_score80': game_score80, 'game_score90': game_score90,\n 'game_score100': game_score100}\nchess_df = pd.DataFrame(chess_dict, index=[x for x in range(1, 50001)])\nchess_df.index.name = 'Event'\nchess_df.to_csv('score_features.csv')\n",
"step-5": "#Script to extract features from chess score data file stockfish.csv\nimport numpy as np\nimport pandas as pd\n\n#Load in and format raw chess game scoring data\nraw_scores = [line.strip().split(\",\")[1].split() for line in open(\"stockfish.csv\")][1:]\n\n#Initialize containers for features to extract\ngame_length = []\naverage_score = []\nscore_stdev = []\nlargest_gain = []\nlargest_drop = []\nmax_score = []\nmin_score = []\nending_score = []\nwhite_avg_improve = []\nblack_avg_improve = []\nwhite_median_improve = []\nblack_median_improve = []\nwhite_q1_improve =[]\nwhite_q2_improve =[]\nwhite_q3_improve =[]\nwhite_q4_improve =[]\nblack_q1_improve =[]\nblack_q2_improve =[]\nblack_q3_improve =[]\nblack_q4_improve =[]\n\ngame_score10 = []\ngame_score20 = []\ngame_score30 = []\ngame_score40 = []\ngame_score50 = []\ngame_score60 = []\ngame_score70 = []\ngame_score80 = []\ngame_score90 = []\ngame_score100 = []\n\nwhite_q1_max =[]\nwhite_q2_max =[]\nwhite_q3_max =[]\nwhite_q4_max =[]\nblack_q1_max =[]\nblack_q2_max =[]\nblack_q3_max =[]\nblack_q4_max =[]\n\nwhite_q1_min =[]\nwhite_q2_min =[]\nwhite_q3_min =[]\nwhite_q4_min =[]\nblack_q1_min =[]\nblack_q2_min =[]\nblack_q3_min =[]\nblack_q4_min =[]\n\nwhite_q1_stdev =[]\nwhite_q2_stdev =[]\nwhite_q3_stdev =[]\nwhite_q4_stdev =[]\nblack_q1_stdev =[]\nblack_q2_stdev =[]\nblack_q3_stdev =[]\nblack_q4_stdev =[]\n\nwhite_5_improve = []\nwhite_10_improve = []\nwhite_15_improve = []\nwhite_20_improve = []\nwhite_25_improve = []\nwhite_30_improve = []\nwhite_35_improve = []\nwhite_40_improve = []\nwhite_45_improve = []\nwhite_50_improve = []\nwhite_55_improve = []\nwhite_60_improve = []\nwhite_65_improve = []\nwhite_70_improve = []\nwhite_75_improve = []\n\nblack_5_improve = []\nblack_10_improve = []\nblack_15_improve = []\nblack_20_improve = []\nblack_25_improve = []\nblack_30_improve = []\nblack_35_improve = []\nblack_40_improve = []\nblack_45_improve = []\nblack_50_improve = []\nblack_55_improve = []\nblack_60_improve = []\nblack_65_improve = []\nblack_70_improve = []\nblack_75_improve = []\n\n\n#Loop through game data, calculate and append new features to feature containers\nfor game in raw_scores:\n game_len = len(game)+1 # Add 1 to game length to avoid divide by zero errors caused by empty games\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != \"NA\":\n score = int(score)\n game_nums.append(score)\n total+=score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player-1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n\n #Add computed values to feature containers\n white_avg = sum(white_improve)/(game_len/2)\n black_avg = sum(black_improve)/(game_len/2)\n game_length.append(game_len)\n average_score.append(total/game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve)//2])\n black_median_improve.append(sorted(black_improve)[len(black_improve)//2])\n\n white_q1_improve.append( sum(white_improve[0:len(white_improve)//4])/len(white_improve)//4 )\n white_q2_improve.append( sum(white_improve[len(white_improve)//4 : (len(white_improve)//4)*2])/len(white_improve)//4 )\n white_q3_improve.append( sum(white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3])/len(white_improve)//4 )\n white_q4_improve.append( sum(white_improve[(len(white_improve)//4)*3 : ])/len(white_improve)//4 )\n black_q1_improve.append( sum(black_improve[0:len(black_improve)//4])/len(black_improve)//4 )\n black_q2_improve.append( sum(black_improve[len(black_improve)//4 : (len(black_improve)//4)*2])/len(black_improve)//4 )\n black_q3_improve.append( sum(black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3])/len(black_improve)//4 )\n black_q4_improve.append( sum(black_improve[(len(black_improve)//4)*3 : ])/len(black_improve)//4 )\n\n white_q1_max.append(max(white_improve[0:1+len(white_improve)//4]))\n white_q2_max.append(max(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))\n white_q3_max.append(max(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))\n white_q4_max.append(max(white_improve[(len(white_improve)//4)*3 : ]))\n black_q1_max.append(max(black_improve[0:1+len(black_improve)//4]))\n black_q2_max.append(max(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))\n black_q3_max.append(max(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))\n black_q4_max.append(max(black_improve[(len(black_improve)//4)*3 : ]))\n\n white_q1_min.append(min(white_improve[0:1+len(white_improve)//4]))\n white_q2_min.append(min(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))\n white_q3_min.append(min(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))\n white_q4_min.append(min(white_improve[(len(white_improve)//4)*3 : ]))\n black_q1_min.append(min(black_improve[0:1+len(black_improve)//4]))\n black_q2_min.append(min(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))\n black_q3_min.append(min(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))\n black_q4_min.append(min(black_improve[(len(black_improve)//4)*3 : ]))\n\n white_q1_stdev.append(np.std(np.array((white_improve[0:len(white_improve)//4]))))\n white_q2_stdev.append(np.std(np.array((white_improve[len(white_improve)//4 : (len(white_improve)//4)*2]))))\n white_q3_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3]))))\n white_q4_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*3 : ]))))\n black_q1_stdev.append(np.std(np.array((black_improve[0:len(black_improve)//4]))))\n black_q2_stdev.append(np.std(np.array((black_improve[len(black_improve)//4 : (len(black_improve)//4)*2]))))\n black_q3_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3]))))\n black_q4_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*3 : ]))))\n\n if len(white_improve) >=5:\n white_5_improve.append( sum(white_improve[0:5])/5 )\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >=10:\n white_10_improve.append( sum(white_improve[5:10])/5 )\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >=15:\n white_15_improve.append( sum(white_improve[10:15])/5 )\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >=20:\n white_20_improve.append( sum(white_improve[15:20])/5 )\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >=25:\n white_25_improve.append( sum(white_improve[20:25])/5 )\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >=30:\n white_30_improve.append( sum(white_improve[25:30])/5 )\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >=35:\n white_35_improve.append( sum(white_improve[30:35])/5 )\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >=40:\n white_40_improve.append( sum(white_improve[35:40])/5 )\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >=45:\n white_45_improve.append( sum(white_improve[40:45])/5 )\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >=50:\n white_50_improve.append( sum(white_improve[45:50])/5 )\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >=55:\n white_55_improve.append( sum(white_improve[50:55])/5 )\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >=60:\n white_60_improve.append( sum(white_improve[55:60])/5 )\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >=65:\n white_65_improve.append( sum(white_improve[60:65])/5 )\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >=70:\n white_70_improve.append( sum(white_improve[65:70])/5 )\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >=75:\n white_75_improve.append( sum(white_improve[70:75])/5 )\n else:\n white_75_improve.append(white_avg)\n\n if len(black_improve) >=5:\n black_5_improve.append( sum(black_improve[0:5])/5 )\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >=10:\n black_10_improve.append( sum(black_improve[5:10])/5 )\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >=15:\n black_15_improve.append( sum(black_improve[10:15])/5 )\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >=20:\n black_20_improve.append( sum(black_improve[15:20])/5 )\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >=25:\n black_25_improve.append( sum(black_improve[20:25])/5 )\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >=30:\n black_30_improve.append( sum(black_improve[25:30])/5 )\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >=35:\n black_35_improve.append( sum(black_improve[30:35])/5 )\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >=40:\n black_40_improve.append( sum(black_improve[35:40])/5 )\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >=45:\n black_45_improve.append( sum(black_improve[40:45])/5 )\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >=50:\n black_50_improve.append( sum(black_improve[45:50])/5 )\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >=55:\n black_55_improve.append( sum(black_improve[50:55])/5 )\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >=60:\n black_60_improve.append( sum(black_improve[55:60])/5 )\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >=65:\n black_65_improve.append( sum(black_improve[60:65])/5 )\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >=70:\n black_70_improve.append( sum(black_improve[65:70])/5 )\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >=75:\n black_75_improve.append( sum(black_improve[70:75])/5 )\n else:\n black_75_improve.append(black_avg)\n\n if len(game_nums)>10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums)>20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums)>30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums)>40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums)>50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums)>60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums)>70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums)>80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums)>90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums)>100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\n\nchess_dict = {\"game_length\":game_length,\"average_score\":average_score,\"score_stdev\":score_stdev,\"largest_gain\":largest_gain,\n \"largest_drop\":largest_drop,\"max_score\":max_score,\"min_score\":min_score,\n \"ending_score\":ending_score, \"white_avg_improve\":white_avg_improve,\n \"black_avg_improve\":black_avg_improve,\"white_median_improve\":white_median_improve,\n \"black_median_improve\":black_median_improve,\"white_q1_improve\":white_q1_improve,\n \"white_q2_improve\":white_q2_improve,\n \"white_q3_improve\":white_q3_improve,\n \"white_q4_improve\":white_q4_improve,\"black_q1_improve\":black_q1_improve,\n \"black_q2_improve\":black_q2_improve,\n \"black_q3_improve\":black_q3_improve,\n \"black_q4_improve\":black_q4_improve,\n 'white_5_improve': white_5_improve,\n 'white_10_improve': white_10_improve,\n 'white_15_improve': white_15_improve,\n 'white_20_improve': white_20_improve,\n 'white_25_improve': white_25_improve,\n 'white_30_improve': white_30_improve,\n 'white_35_improve': white_35_improve,\n 'white_40_improve': white_40_improve,\n 'white_45_improve': white_45_improve,\n 'white_50_improve': white_50_improve,\n 'white_55_improve': white_55_improve,\n 'white_60_improve': white_60_improve,\n 'white_65_improve': white_65_improve,\n 'white_70_improve': white_70_improve,\n 'white_75_improve': white_75_improve,\n 'black_5_improve': black_5_improve,\n 'black_10_improve': black_10_improve,\n 'black_15_improve': black_15_improve,\n 'black_20_improve': black_20_improve,\n 'black_25_improve': black_25_improve,\n 'black_30_improve': black_30_improve,\n 'black_35_improve': black_35_improve,\n 'black_40_improve': black_40_improve,\n 'black_45_improve': black_45_improve,\n 'black_50_improve': black_50_improve,\n 'black_55_improve': black_55_improve,\n 'black_60_improve': black_60_improve,\n 'black_65_improve': black_65_improve,\n 'black_70_improve': black_70_improve,\n 'black_75_improve': black_75_improve,\n\n 'white_q1_max': white_q1_max,\n 'white_q2_max': white_q2_max,\n 'white_q3_max': white_q3_max,\n 'white_q4_max': white_q4_max,\n 'black_q1_max': black_q1_max,\n 'black_q2_max': black_q2_max,\n 'black_q3_max': black_q3_max,\n 'black_q4_max': black_q4_max,\n\n 'white_q1_min': white_q1_min,\n 'white_q2_min': white_q2_min,\n 'white_q3_min': white_q3_min,\n 'white_q4_min': white_q4_min,\n 'black_q1_min': black_q1_min,\n 'black_q2_min': black_q2_min,\n 'black_q3_min': black_q3_min,\n 'black_q4_min': black_q4_min,\n\n 'white_q1_stdev': white_q1_stdev,\n 'white_q2_stdev': white_q2_stdev,\n 'white_q3_stdev': white_q3_stdev,\n 'white_q4_stdev': white_q4_stdev,\n 'black_q1_stdev': black_q1_stdev,\n 'black_q2_stdev': black_q2_stdev,\n 'black_q3_stdev': black_q3_stdev,\n 'black_q4_stdev': black_q4_stdev,\n\n 'game_score10':game_score10,\n 'game_score20':game_score20,\n 'game_score30':game_score30,\n 'game_score40':game_score40,\n 'game_score50':game_score50,\n 'game_score60':game_score60,\n 'game_score70':game_score70,\n 'game_score80':game_score80,\n 'game_score90':game_score90,\n 'game_score100':game_score100\n}\n\n#Create feature data frame\nchess_df = pd.DataFrame(chess_dict, index=[x for x in range(1,50001)])\nchess_df.index.name = \"Event\"\n\n#Write the new feature data frame to CSV\nchess_df.to_csv(\"score_features.csv\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import io
from flask import Flask, send_file
app = Flask(__name__)
@app.route('/')
def index():
buf = io.BytesIO()
buf.write('hello world')
buf.seek(0)
return send_file(buf,
attachment_filename="testing.txt",
as_attachment=True)
|
normal
|
{
"blob_id": "362c4e572f0fe61b77e54ab5608d4cd052291da4",
"index": 4043,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf, attachment_filename='testing.txt', as_attachment=True\n )\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf, attachment_filename='testing.txt', as_attachment=True\n )\n",
"step-4": "import io\nfrom flask import Flask, send_file\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf, attachment_filename='testing.txt', as_attachment=True\n )\n",
"step-5": "import io\n\nfrom flask import Flask, send_file\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n buf = io.BytesIO()\n buf.write('hello world')\n buf.seek(0)\n return send_file(buf,\n attachment_filename=\"testing.txt\",\n as_attachment=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Apigee(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Apigee(object):
<|reserved_special_token_0|>
def __init__(self, org_name, username, password):
self.proxies = Proxies(org_name, username, password)
self.roles = Roles(org_name, username, password)
self.products = Products(org_name, username, password)
self.resourcefiles = ResourceFiles(org_name, username, password,
environment)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Apigee(object):
"""Provides easy access to all endpoint classes
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
"""
def __init__(self, org_name, username, password):
self.proxies = Proxies(org_name, username, password)
self.roles = Roles(org_name, username, password)
self.products = Products(org_name, username, password)
self.resourcefiles = ResourceFiles(org_name, username, password,
environment)
<|reserved_special_token_1|>
from .proxies import Proxies
from .roles import Roles
from .products import Products
from .resourcefiles import ResourceFiles
class Apigee(object):
"""Provides easy access to all endpoint classes
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
"""
def __init__(self, org_name, username, password):
self.proxies = Proxies(org_name, username, password)
self.roles = Roles(org_name, username, password)
self.products = Products(org_name, username, password)
self.resourcefiles = ResourceFiles(org_name, username, password,
environment)
|
flexible
|
{
"blob_id": "656927013d9a0254e2bc4cdf05b7cfd5947feb05",
"index": 7868,
"step-1": "<mask token>\n\n\nclass Apigee(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Apigee(object):\n <mask token>\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-3": "<mask token>\n\n\nclass Apigee(object):\n \"\"\"Provides easy access to all endpoint classes\n\n Args:\n domain (str): Your Auth0 domain, e.g: 'username.auth0.com'\n\n token (str): Management API v2 Token\n \"\"\"\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-4": "from .proxies import Proxies\nfrom .roles import Roles\nfrom .products import Products\nfrom .resourcefiles import ResourceFiles\n\n\nclass Apigee(object):\n \"\"\"Provides easy access to all endpoint classes\n\n Args:\n domain (str): Your Auth0 domain, e.g: 'username.auth0.com'\n\n token (str): Management API v2 Token\n \"\"\"\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from collections import Counter
from copy import deepcopy
from itertools import count
from traceback import print_exc
#https://www.websudoku.com/?level=4
class SudukoBoard:
side=3
sz=side*side
class Cell:
def __init__(self,board,row,col):
self._values= [None] * SudukoBoard.sz
self._value=None
self.sets=[]
self.row=row
self.col=col
self.open=SudukoBoard.sz
self.board=board
def add_set(self,set):
self.sets.append(set)
@property
def value(self):
return self._value
@value.setter
def value(self,value):
if self._value is not None and self._value!=value:
raise ValueError("Conflicting value for cell",self.row,self.col,self._value,value)
if self._value != value:
self._value=value
self._values=[False]*SudukoBoard.sz
self._values[value-1]=True
self.open=0
self.board.open-=1
for s in self.sets:
for c in s.entries:
if c!=self:
c.cantbe(value)
def cantbe(self, value):
if self._values[value - 1] == True:
raise ValueError("Conflicting cant be for cell, already set",self.row,self.col,self._value,value)
if self._values[value-1] != False:
self._values[value-1]=False
self.open -=1
cnt=0
nidx=None
for idx,v in enumerate(self._values):
if v is None:
cnt+=1
nidx=idx
if cnt==1:
self.value=nidx+1
def couldbe(self, value):
return self._values[value - 1]
def couldbelist(self):
return [idx+1 for idx,x in enumerate(self._values) if x is None]
class Set:
def __init__(self):
self.entries=[]
def add_cell(self,cell):
self.entries.append(cell)
cell.add_set(self)
def update(self,entry):
value=entry.value
for other in self.entries:
if other==entry:
continue
if other.value == value:
raise Exception("Illegal value")
else:
other.value=not value
def __init__(self):
self.initial=0
self.open=SudukoBoard.sz**2
self.cells=[]
self.rows=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.cols=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.blks=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
s3=SudukoBoard.side*SudukoBoard.sz
for i in range(SudukoBoard.sz**2):
cell=SudukoBoard.Cell(self,i//SudukoBoard.sz,i%SudukoBoard.sz)
self.cells.append(cell)
for cell in self.cells:
self.rows[cell.row].add_cell(cell)
self.cols[cell.col].add_cell(cell)
self.blks[(cell.row)//SudukoBoard.side+((cell.col)//SudukoBoard.side)*SudukoBoard.side].add_cell(cell)
def setup(self,txt):
trows=txt.split(",")
if len(trows)!=SudukoBoard.sz:
raise Exception("Incorrect number of rows")
cnt=0
for ridx,trow in enumerate(trows):
if len(trows) != SudukoBoard.sz:
raise Exception("Incorrect number of columns row ",ridx)
for cidx,c in enumerate(trow):
if c != '.':
v=int(c)
cnt+=1
self.set(ridx,cidx,v)
# print("Set ",ridx+1,cidx+1, " tot ",cnt," left ",self.open,
# " auto ",SudukoBoard.sz**2-self.open-cnt)
# self.print()
def set(self,row,col,value):
self.rows[row].entries[col].value=value
def print(self):
for ridx,r in enumerate(self.rows):
for cidx,c in enumerate(r.entries):
print("." if c.value is None else c.value,end='')
if (cidx+1)%SudukoBoard.side == 0:
print("|",end='')
print()
if (ridx+1)%SudukoBoard.side == 0:
print("{}".format("-"*(SudukoBoard.sz+SudukoBoard.side)))
def solve(self,depth=0,guesses=[]):
for i in range(1000):
print("Iteration ",depth,i)
# for c in self.cells:
# print(c.row,c.col,c.couldbelist(),c._value,c._values)
open=[Counter([len(c.couldbelist()) for c in self.cells])]
print("open cells",open)
for c in self.cells:
if c.open!=1:
continue
if c.open != len(c.couldbelist()):
pass
value=c.couldbelist()
c.set(value)
if self.open >0 and not 1 in open:
print("We have to guess depth {} and {} cells open".format(depth,self.open))
bestguess=[]
for c in self.cells:
for guess in c.couldbelist():
other=deepcopy(self)
try:
other.set(c.row,c.col,guess)
bestguess.append((other.open,(c.row,c.col,guess)))
except ValueError as e:
pass
except Exception as e:
print_exc()
for open,(row,col,guess) in sorted(bestguess):
print("Best guess ",row,col,guess,depth)
other = deepcopy(self)
other.set(row,col,guess)
soln,soln_guesses = other.solve(depth + 1,guesses+[(row,col,guess)])
if soln.open == 0:
print("guess return")
return soln,soln_guesses
# if self.open == 0:
# print("Solved with {} guesses {}".format(depth,guesses))
# self.print()
return self,guesses
def leftopen(self):
cnt=0
for c in self.cells:
if c.value is None:
cnt+=1
if cnt != self.open:
assert "BAD"
return cnt
if __name__ == "__main__":
board=SudukoBoard()
evil="..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1.."
evil2="..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3.."
medium="8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2"
hard="......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......"
easy=".7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1."
board.setup(evil2)
board.print()
print()
soln,guesses=board.solve()
print("Final : guesses",guesses)
soln.print()
pass
|
normal
|
{
"blob_id": "44d9e628e31cdb36088b969da2f6e9af1b1d3efe",
"index": 7841,
"step-1": "<mask token>\n\n\nclass SudukoBoard:\n <mask token>\n <mask token>\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n <mask token>\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SudukoBoard:\n side = 3\n sz = side * side\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n\n def print(self):\n for ridx, r in enumerate(self.rows):\n for cidx, c in enumerate(r.entries):\n print('.' if c.value is None else c.value, end='')\n if (cidx + 1) % SudukoBoard.side == 0:\n print('|', end='')\n print()\n if (ridx + 1) % SudukoBoard.side == 0:\n print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SudukoBoard:\n side = 3\n sz = side * side\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n\n def print(self):\n for ridx, r in enumerate(self.rows):\n for cidx, c in enumerate(r.entries):\n print('.' if c.value is None else c.value, end='')\n if (cidx + 1) % SudukoBoard.side == 0:\n print('|', end='')\n print()\n if (ridx + 1) % SudukoBoard.side == 0:\n print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\nif __name__ == '__main__':\n board = SudukoBoard()\n evil = (\n '..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..'\n )\n evil2 = (\n '..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..'\n )\n medium = (\n '8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2'\n )\n hard = (\n '......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......'\n )\n easy = (\n '.7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.'\n )\n board.setup(evil2)\n board.print()\n print()\n soln, guesses = board.solve()\n print('Final : guesses', guesses)\n soln.print()\n pass\n",
"step-4": "from collections import Counter\nfrom copy import deepcopy\nfrom itertools import count\nfrom traceback import print_exc\n\n\nclass SudukoBoard:\n side = 3\n sz = side * side\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n\n def print(self):\n for ridx, r in enumerate(self.rows):\n for cidx, c in enumerate(r.entries):\n print('.' if c.value is None else c.value, end='')\n if (cidx + 1) % SudukoBoard.side == 0:\n print('|', end='')\n print()\n if (ridx + 1) % SudukoBoard.side == 0:\n print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\nif __name__ == '__main__':\n board = SudukoBoard()\n evil = (\n '..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..'\n )\n evil2 = (\n '..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..'\n )\n medium = (\n '8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2'\n )\n hard = (\n '......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......'\n )\n easy = (\n '.7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.'\n )\n board.setup(evil2)\n board.print()\n print()\n soln, guesses = board.solve()\n print('Final : guesses', guesses)\n soln.print()\n pass\n",
"step-5": "from collections import Counter\nfrom copy import deepcopy\nfrom itertools import count\nfrom traceback import print_exc\n\n#https://www.websudoku.com/?level=4\n\nclass SudukoBoard:\n side=3\n sz=side*side\n class Cell:\n def __init__(self,board,row,col):\n self._values= [None] * SudukoBoard.sz\n self._value=None\n self.sets=[]\n self.row=row\n self.col=col\n self.open=SudukoBoard.sz\n self.board=board\n\n def add_set(self,set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self,value):\n if self._value is not None and self._value!=value:\n raise ValueError(\"Conflicting value for cell\",self.row,self.col,self._value,value)\n if self._value != value:\n self._value=value\n self._values=[False]*SudukoBoard.sz\n self._values[value-1]=True\n self.open=0\n self.board.open-=1\n for s in self.sets:\n for c in s.entries:\n if c!=self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError(\"Conflicting cant be for cell, already set\",self.row,self.col,self._value,value)\n if self._values[value-1] != False:\n self._values[value-1]=False\n self.open -=1\n cnt=0\n nidx=None\n for idx,v in enumerate(self._values):\n if v is None:\n cnt+=1\n nidx=idx\n if cnt==1:\n self.value=nidx+1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [idx+1 for idx,x in enumerate(self._values) if x is None]\n\n class Set:\n def __init__(self):\n self.entries=[]\n\n def add_cell(self,cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self,entry):\n value=entry.value\n for other in self.entries:\n if other==entry:\n continue\n if other.value == value:\n raise Exception(\"Illegal value\")\n else:\n other.value=not value\n\n def __init__(self):\n self.initial=0\n self.open=SudukoBoard.sz**2\n self.cells=[]\n self.rows=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3=SudukoBoard.side*SudukoBoard.sz\n for i in range(SudukoBoard.sz**2):\n cell=SudukoBoard.Cell(self,i//SudukoBoard.sz,i%SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[(cell.row)//SudukoBoard.side+((cell.col)//SudukoBoard.side)*SudukoBoard.side].add_cell(cell)\n\n def setup(self,txt):\n trows=txt.split(\",\")\n if len(trows)!=SudukoBoard.sz:\n raise Exception(\"Incorrect number of rows\")\n cnt=0\n for ridx,trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception(\"Incorrect number of columns row \",ridx)\n for cidx,c in enumerate(trow):\n if c != '.':\n v=int(c)\n cnt+=1\n self.set(ridx,cidx,v)\n # print(\"Set \",ridx+1,cidx+1, \" tot \",cnt,\" left \",self.open,\n # \" auto \",SudukoBoard.sz**2-self.open-cnt)\n # self.print()\n\n def set(self,row,col,value):\n self.rows[row].entries[col].value=value\n\n def print(self):\n for ridx,r in enumerate(self.rows):\n for cidx,c in enumerate(r.entries):\n print(\".\" if c.value is None else c.value,end='')\n if (cidx+1)%SudukoBoard.side == 0:\n print(\"|\",end='')\n print()\n if (ridx+1)%SudukoBoard.side == 0:\n print(\"{}\".format(\"-\"*(SudukoBoard.sz+SudukoBoard.side)))\n\n def solve(self,depth=0,guesses=[]):\n for i in range(1000):\n print(\"Iteration \",depth,i)\n # for c in self.cells:\n # print(c.row,c.col,c.couldbelist(),c._value,c._values)\n open=[Counter([len(c.couldbelist()) for c in self.cells])]\n print(\"open cells\",open)\n for c in self.cells:\n if c.open!=1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value=c.couldbelist()\n c.set(value)\n\n if self.open >0 and not 1 in open:\n print(\"We have to guess depth {} and {} cells open\".format(depth,self.open))\n bestguess=[]\n for c in self.cells:\n for guess in c.couldbelist():\n other=deepcopy(self)\n try:\n other.set(c.row,c.col,guess)\n bestguess.append((other.open,(c.row,c.col,guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open,(row,col,guess) in sorted(bestguess):\n print(\"Best guess \",row,col,guess,depth)\n other = deepcopy(self)\n other.set(row,col,guess)\n soln,soln_guesses = other.solve(depth + 1,guesses+[(row,col,guess)])\n if soln.open == 0:\n print(\"guess return\")\n return soln,soln_guesses\n # if self.open == 0:\n # print(\"Solved with {} guesses {}\".format(depth,guesses))\n # self.print()\n return self,guesses\n\n\n\n def leftopen(self):\n cnt=0\n for c in self.cells:\n if c.value is None:\n cnt+=1\n if cnt != self.open:\n assert \"BAD\"\n return cnt\nif __name__ == \"__main__\":\n board=SudukoBoard()\n evil=\"..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..\"\n evil2=\"..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..\"\n medium=\"8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2\"\n hard=\"......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......\"\n easy=\".7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.\"\n board.setup(evil2)\n board.print()\n print()\n soln,guesses=board.solve()\n print(\"Final : guesses\",guesses)\n soln.print()\n pass",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
"""The prediction classes. Instances of the class are returned by
the recommender.
"""
class RelationshipPrediction(object):
"""The prediction of the predicted_relationship appearing between
the given subject-object pair.
@type subject: the domain-specific subject
@ivar subject: the subject
@type object_: the domain-specific object
@ivar object_: the object
@type expectancy: float
@ivar expectancy: the estimated probability of the predict_relationship
occuring between the subject and the object
@type explanation: str
@ivar explanation: the explanation for the prediction
"""
def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):
"""The initializer"""
self.subject = subject
"""The subject"""
self.object_ = object_
"""The object"""
self.expectancy = expectancy
"""The estimated probability of the predicted_relationship
occuring between the subject and the object.
"""
self.is_uncertain = is_uncertain
"""Is the prediction made without having any information available?"""
self.explanation = explanation
"""The explanation for the prediction"""
def __unicode__(self):
return u"%s <- %s: %f, %s" % (
self.subject,
self.object_,
self.expectancy,
self.explanation
)
def __repr__(self):
return "< %s >" % str(self.__unicode__())
|
normal
|
{
"blob_id": "c3de9e6129bcafd863cd330ac281345fb563cc8c",
"index": 6259,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n\n def __repr__(self):\n return '< %s >' % str(self.__unicode__())\n",
"step-4": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n\n def __init__(self, subject, object_, expectancy, is_uncertain,\n explanation=''):\n \"\"\"The initializer\"\"\"\n self.subject = subject\n \"\"\"The subject\"\"\"\n self.object_ = object_\n \"\"\"The object\"\"\"\n self.expectancy = expectancy\n \"\"\"The estimated probability of the predicted_relationship\n occuring between the subject and the object.\n \"\"\"\n self.is_uncertain = is_uncertain\n \"\"\"Is the prediction made without having any information available?\"\"\"\n self.explanation = explanation\n \"\"\"The explanation for the prediction\"\"\"\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n\n def __repr__(self):\n return '< %s >' % str(self.__unicode__())\n",
"step-5": "\"\"\"The prediction classes. Instances of the class are returned by \nthe recommender.\n\"\"\"\n\nclass RelationshipPrediction(object):\n \"\"\"The prediction of the predicted_relationship appearing between\n the given subject-object pair.\n \n @type subject: the domain-specific subject\n @ivar subject: the subject \n \n @type object_: the domain-specific object\n @ivar object_: the object\n \n @type expectancy: float\n @ivar expectancy: the estimated probability of the predict_relationship\n occuring between the subject and the object\n \n @type explanation: str\n @ivar explanation: the explanation for the prediction \n \"\"\"\n \n def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):\n \"\"\"The initializer\"\"\"\n \n self.subject = subject\n \"\"\"The subject\"\"\"\n \n self.object_ = object_\n \"\"\"The object\"\"\"\n \n self.expectancy = expectancy\n \"\"\"The estimated probability of the predicted_relationship\n occuring between the subject and the object.\n \"\"\"\n \n self.is_uncertain = is_uncertain\n \"\"\"Is the prediction made without having any information available?\"\"\"\n \n self.explanation = explanation\n \"\"\"The explanation for the prediction\"\"\"\n\n def __unicode__(self):\n return u\"%s <- %s: %f, %s\" % (\n self.subject, \n self.object_, \n self.expectancy, \n self.explanation\n )\n \n def __repr__(self):\n return \"< %s >\" % str(self.__unicode__())\n",
"step-ids": [
0,
2,
3,
4,
6
]
}
|
[
0,
2,
3,
4,
6
] |
#8
def matrix(m):
for i in range(len(m)):
for j in range (len(m[0])):
m[i][j]=(m[i][j])**2
a=[[1,2,3],[4,5,6],[8,9,0]]
print('The matrix is ',a)
matrix(a)
print('The updated matrix is ',a)
|
normal
|
{
"blob_id": "f46dd5217c8e015546d7fff7ee52569ecc2c8e41",
"index": 5487,
"step-1": "<mask token>\n",
"step-2": "def matrix(m):\n for i in range(len(m)):\n for j in range(len(m[0])):\n m[i][j] = m[i][j] ** 2\n\n\n<mask token>\n",
"step-3": "def matrix(m):\n for i in range(len(m)):\n for j in range(len(m[0])):\n m[i][j] = m[i][j] ** 2\n\n\n<mask token>\nprint('The matrix is ', a)\nmatrix(a)\nprint('The updated matrix is ', a)\n",
"step-4": "def matrix(m):\n for i in range(len(m)):\n for j in range(len(m[0])):\n m[i][j] = m[i][j] ** 2\n\n\na = [[1, 2, 3], [4, 5, 6], [8, 9, 0]]\nprint('The matrix is ', a)\nmatrix(a)\nprint('The updated matrix is ', a)\n",
"step-5": "#8\ndef matrix(m):\n for i in range(len(m)):\n for j in range (len(m[0])):\n m[i][j]=(m[i][j])**2 \n\na=[[1,2,3],[4,5,6],[8,9,0]]\nprint('The matrix is ',a)\nmatrix(a)\nprint('The updated matrix is ',a)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
plastiqpublicapi
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
import json
import dateutil.parser
from tests.controllers.controller_test_base import ControllerTestBase
from tests.test_helper import TestHelper
from tests.http_response_catcher import HttpResponseCatcher
from plastiqpublicapi.api_helper import APIHelper
from plastiqpublicapi.controllers.categories_controller import CategoriesController
class CategoriesControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(CategoriesControllerTests, cls).setUpClass()
cls.response_catcher = HttpResponseCatcher()
cls.controller = CategoriesController(cls.config, cls.response_catcher)
# Retrieve a paginated list of Categories by query parameter(s)
def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self):
# Perform the API call through the SDK function
result = self.controller.retrieve_a_paginated_list_of_categories_by_query_parameter_s()
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['trace-id'] = None
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
|
normal
|
{
"blob_id": "a4f2418e746cc43bd407b6a212de9802044351e1",
"index": 3928,
"step-1": "<mask token>\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n\n @classmethod\n def setUpClass(cls):\n super(CategoriesControllerTests, cls).setUpClass()\n cls.response_catcher = HttpResponseCatcher()\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n\n @classmethod\n def setUpClass(cls):\n super(CategoriesControllerTests, cls).setUpClass()\n cls.response_catcher = HttpResponseCatcher()\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\n\n def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self\n ):\n result = (self.controller.\n retrieve_a_paginated_list_of_categories_by_query_parameter_s())\n self.assertEquals(self.response_catcher.response.status_code, 200)\n expected_headers = {}\n expected_headers['trace-id'] = None\n expected_headers['content-type'] = 'application/json'\n self.assertTrue(TestHelper.match_headers(expected_headers, self.\n response_catcher.response.headers))\n",
"step-4": "<mask token>\nimport json\nimport dateutil.parser\nfrom tests.controllers.controller_test_base import ControllerTestBase\nfrom tests.test_helper import TestHelper\nfrom tests.http_response_catcher import HttpResponseCatcher\nfrom plastiqpublicapi.api_helper import APIHelper\nfrom plastiqpublicapi.controllers.categories_controller import CategoriesController\n\n\nclass CategoriesControllerTests(ControllerTestBase):\n\n @classmethod\n def setUpClass(cls):\n super(CategoriesControllerTests, cls).setUpClass()\n cls.response_catcher = HttpResponseCatcher()\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\n\n def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self\n ):\n result = (self.controller.\n retrieve_a_paginated_list_of_categories_by_query_parameter_s())\n self.assertEquals(self.response_catcher.response.status_code, 200)\n expected_headers = {}\n expected_headers['trace-id'] = None\n expected_headers['content-type'] = 'application/json'\n self.assertTrue(TestHelper.match_headers(expected_headers, self.\n response_catcher.response.headers))\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nplastiqpublicapi\r\n\r\nThis file was automatically generated by APIMATIC v3.0 (\r\n https://www.apimatic.io ).\r\n\"\"\"\r\n\r\nimport json\r\nimport dateutil.parser\r\n\r\nfrom tests.controllers.controller_test_base import ControllerTestBase\r\nfrom tests.test_helper import TestHelper\r\nfrom tests.http_response_catcher import HttpResponseCatcher\r\nfrom plastiqpublicapi.api_helper import APIHelper\r\nfrom plastiqpublicapi.controllers.categories_controller import CategoriesController\r\n\r\n\r\nclass CategoriesControllerTests(ControllerTestBase):\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n super(CategoriesControllerTests, cls).setUpClass()\r\n cls.response_catcher = HttpResponseCatcher()\r\n cls.controller = CategoriesController(cls.config, cls.response_catcher)\r\n\r\n # Retrieve a paginated list of Categories by query parameter(s)\r\n def test_retrieve_a_paginated_list_of_categories_by_query_parameter_s(self):\r\n\r\n # Perform the API call through the SDK function\r\n result = self.controller.retrieve_a_paginated_list_of_categories_by_query_parameter_s()\r\n\r\n # Test response code\r\n self.assertEquals(self.response_catcher.response.status_code, 200)\r\n\r\n # Test headers\r\n expected_headers = {}\r\n expected_headers['trace-id'] = None\r\n expected_headers['content-type'] = 'application/json'\r\n\r\n self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S', format
='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
<|reserved_special_token_0|>
@cron_wait
async def verify_error_proxy_task():
logger.info('run verify_error_proxy_task')
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()
s.close()
if c < VERIFY_ERROR_LIMIT:
await verify_error_proxy()
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()
if c > MAX_ERROR_PROXIES:
res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc
(Proxy.updated_at)).limit(c - MAX_ERROR_PROXIES).from_self().all()
[s.delete(i) for i in res]
s.commit()
@cron_wait
async def update_squid_task():
logger.info('run update_squid_task')
s = sess_maker()
proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()
s.close()
squid.update_conf(proxies)
@cron_wait
async def verify_ok_proxy_task():
logger.info('run verify_ok_proxy_task')
await verifier.verify_ok_proxy()
await verify_error_proxy_task()
await update_squid_task()
@cron_wait
async def fetch_new_proxy_task():
logger.info('run fetch_new_proxy_task')
await spider.run_spider()
await verifier.verify_new_proxy()
await update_squid_task()
if __name__ == '__main__':
logger.info('start')
loop = asyncio.get_event_loop()
loop.run_until_complete(update_squid_task())
msh = Scheduler()
msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))
msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))
try:
loop.run_until_complete(asyncio.wait([msh.start(), run_api_server()]))
loop.run_forever()
except KeyboardInterrupt:
print('exit')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S', format
='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
VERIFY_ERROR_LIMIT = int(os.getenv('VERIFY_ERROR_LIMIT', 100))
MAX_ERROR_PROXIES = int(os.getenv('MAX_ERROR_PROXIES', 2048))
@cron_wait
async def verify_error_proxy_task():
logger.info('run verify_error_proxy_task')
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()
s.close()
if c < VERIFY_ERROR_LIMIT:
await verify_error_proxy()
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()
if c > MAX_ERROR_PROXIES:
res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc
(Proxy.updated_at)).limit(c - MAX_ERROR_PROXIES).from_self().all()
[s.delete(i) for i in res]
s.commit()
@cron_wait
async def update_squid_task():
logger.info('run update_squid_task')
s = sess_maker()
proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()
s.close()
squid.update_conf(proxies)
@cron_wait
async def verify_ok_proxy_task():
logger.info('run verify_ok_proxy_task')
await verifier.verify_ok_proxy()
await verify_error_proxy_task()
await update_squid_task()
@cron_wait
async def fetch_new_proxy_task():
logger.info('run fetch_new_proxy_task')
await spider.run_spider()
await verifier.verify_new_proxy()
await update_squid_task()
if __name__ == '__main__':
logger.info('start')
loop = asyncio.get_event_loop()
loop.run_until_complete(update_squid_task())
msh = Scheduler()
msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))
msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))
try:
loop.run_until_complete(asyncio.wait([msh.start(), run_api_server()]))
loop.run_forever()
except KeyboardInterrupt:
print('exit')
<|reserved_special_token_1|>
import asyncio
import logging
import os
from async_cron.job import CronJob
from async_cron.schedule import Scheduler
from sqlalchemy import asc
import spider
import squid
import verifier
from db import sess_maker
from model import Proxy, STATUS_OK, STATUS_ERROR
from server import run_api_server
from tool import logger, cron_wait
from verifier import verify_error_proxy
logging.basicConfig(level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S', format
='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
VERIFY_ERROR_LIMIT = int(os.getenv('VERIFY_ERROR_LIMIT', 100))
MAX_ERROR_PROXIES = int(os.getenv('MAX_ERROR_PROXIES', 2048))
@cron_wait
async def verify_error_proxy_task():
logger.info('run verify_error_proxy_task')
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()
s.close()
if c < VERIFY_ERROR_LIMIT:
await verify_error_proxy()
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()
if c > MAX_ERROR_PROXIES:
res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc
(Proxy.updated_at)).limit(c - MAX_ERROR_PROXIES).from_self().all()
[s.delete(i) for i in res]
s.commit()
@cron_wait
async def update_squid_task():
logger.info('run update_squid_task')
s = sess_maker()
proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()
s.close()
squid.update_conf(proxies)
@cron_wait
async def verify_ok_proxy_task():
logger.info('run verify_ok_proxy_task')
await verifier.verify_ok_proxy()
await verify_error_proxy_task()
await update_squid_task()
@cron_wait
async def fetch_new_proxy_task():
logger.info('run fetch_new_proxy_task')
await spider.run_spider()
await verifier.verify_new_proxy()
await update_squid_task()
if __name__ == '__main__':
logger.info('start')
loop = asyncio.get_event_loop()
loop.run_until_complete(update_squid_task())
msh = Scheduler()
msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))
msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))
try:
loop.run_until_complete(asyncio.wait([msh.start(), run_api_server()]))
loop.run_forever()
except KeyboardInterrupt:
print('exit')
<|reserved_special_token_1|>
import asyncio
import logging
import os
from async_cron.job import CronJob
from async_cron.schedule import Scheduler
from sqlalchemy import asc
import spider
import squid
import verifier
from db import sess_maker
from model import Proxy, STATUS_OK, STATUS_ERROR
from server import run_api_server
from tool import logger, cron_wait
from verifier import verify_error_proxy
logging.basicConfig(
level=logging.INFO,
datefmt='%Y/%m/%d %H:%M:%S',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
VERIFY_ERROR_LIMIT = int(os.getenv("VERIFY_ERROR_LIMIT", 100))
MAX_ERROR_PROXIES = int(os.getenv("MAX_ERROR_PROXIES", 2048))
@cron_wait
async def verify_error_proxy_task():
logger.info("run verify_error_proxy_task")
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()
s.close()
if c < VERIFY_ERROR_LIMIT:
await verify_error_proxy()
s = sess_maker()
c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()
if c > MAX_ERROR_PROXIES:
res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc(Proxy.updated_at)).limit(
c - MAX_ERROR_PROXIES).from_self().all()
[s.delete(i) for i in res]
s.commit()
@cron_wait
async def update_squid_task():
logger.info("run update_squid_task")
s = sess_maker()
proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()
s.close()
squid.update_conf(proxies)
@cron_wait
async def verify_ok_proxy_task():
logger.info("run verify_ok_proxy_task")
await verifier.verify_ok_proxy()
await verify_error_proxy_task()
await update_squid_task()
@cron_wait
async def fetch_new_proxy_task():
logger.info("run fetch_new_proxy_task")
await spider.run_spider()
await verifier.verify_new_proxy()
# await verify_error_proxy_task()
await update_squid_task()
if __name__ == '__main__':
logger.info("start")
loop = asyncio.get_event_loop()
loop.run_until_complete(update_squid_task())
msh = Scheduler()
msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))
msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))
try:
loop.run_until_complete(asyncio.wait([
msh.start(),
run_api_server(),
]))
loop.run_forever()
except KeyboardInterrupt:
print('exit')
|
flexible
|
{
"blob_id": "1d529e2ea5526ddcda0d0da30ed8ed4724002c63",
"index": 7074,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S', format\n ='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n<mask token>\n\n\n@cron_wait\nasync def verify_error_proxy_task():\n logger.info('run verify_error_proxy_task')\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()\n s.close()\n if c < VERIFY_ERROR_LIMIT:\n await verify_error_proxy()\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()\n if c > MAX_ERROR_PROXIES:\n res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc\n (Proxy.updated_at)).limit(c - MAX_ERROR_PROXIES).from_self().all()\n [s.delete(i) for i in res]\n s.commit()\n\n\n@cron_wait\nasync def update_squid_task():\n logger.info('run update_squid_task')\n s = sess_maker()\n proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()\n s.close()\n squid.update_conf(proxies)\n\n\n@cron_wait\nasync def verify_ok_proxy_task():\n logger.info('run verify_ok_proxy_task')\n await verifier.verify_ok_proxy()\n await verify_error_proxy_task()\n await update_squid_task()\n\n\n@cron_wait\nasync def fetch_new_proxy_task():\n logger.info('run fetch_new_proxy_task')\n await spider.run_spider()\n await verifier.verify_new_proxy()\n await update_squid_task()\n\n\nif __name__ == '__main__':\n logger.info('start')\n loop = asyncio.get_event_loop()\n loop.run_until_complete(update_squid_task())\n msh = Scheduler()\n msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))\n msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))\n try:\n loop.run_until_complete(asyncio.wait([msh.start(), run_api_server()]))\n loop.run_forever()\n except KeyboardInterrupt:\n print('exit')\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S', format\n ='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nVERIFY_ERROR_LIMIT = int(os.getenv('VERIFY_ERROR_LIMIT', 100))\nMAX_ERROR_PROXIES = int(os.getenv('MAX_ERROR_PROXIES', 2048))\n\n\n@cron_wait\nasync def verify_error_proxy_task():\n logger.info('run verify_error_proxy_task')\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()\n s.close()\n if c < VERIFY_ERROR_LIMIT:\n await verify_error_proxy()\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()\n if c > MAX_ERROR_PROXIES:\n res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc\n (Proxy.updated_at)).limit(c - MAX_ERROR_PROXIES).from_self().all()\n [s.delete(i) for i in res]\n s.commit()\n\n\n@cron_wait\nasync def update_squid_task():\n logger.info('run update_squid_task')\n s = sess_maker()\n proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()\n s.close()\n squid.update_conf(proxies)\n\n\n@cron_wait\nasync def verify_ok_proxy_task():\n logger.info('run verify_ok_proxy_task')\n await verifier.verify_ok_proxy()\n await verify_error_proxy_task()\n await update_squid_task()\n\n\n@cron_wait\nasync def fetch_new_proxy_task():\n logger.info('run fetch_new_proxy_task')\n await spider.run_spider()\n await verifier.verify_new_proxy()\n await update_squid_task()\n\n\nif __name__ == '__main__':\n logger.info('start')\n loop = asyncio.get_event_loop()\n loop.run_until_complete(update_squid_task())\n msh = Scheduler()\n msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))\n msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))\n try:\n loop.run_until_complete(asyncio.wait([msh.start(), run_api_server()]))\n loop.run_forever()\n except KeyboardInterrupt:\n print('exit')\n",
"step-4": "import asyncio\nimport logging\nimport os\nfrom async_cron.job import CronJob\nfrom async_cron.schedule import Scheduler\nfrom sqlalchemy import asc\nimport spider\nimport squid\nimport verifier\nfrom db import sess_maker\nfrom model import Proxy, STATUS_OK, STATUS_ERROR\nfrom server import run_api_server\nfrom tool import logger, cron_wait\nfrom verifier import verify_error_proxy\nlogging.basicConfig(level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S', format\n ='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nVERIFY_ERROR_LIMIT = int(os.getenv('VERIFY_ERROR_LIMIT', 100))\nMAX_ERROR_PROXIES = int(os.getenv('MAX_ERROR_PROXIES', 2048))\n\n\n@cron_wait\nasync def verify_error_proxy_task():\n logger.info('run verify_error_proxy_task')\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()\n s.close()\n if c < VERIFY_ERROR_LIMIT:\n await verify_error_proxy()\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()\n if c > MAX_ERROR_PROXIES:\n res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc\n (Proxy.updated_at)).limit(c - MAX_ERROR_PROXIES).from_self().all()\n [s.delete(i) for i in res]\n s.commit()\n\n\n@cron_wait\nasync def update_squid_task():\n logger.info('run update_squid_task')\n s = sess_maker()\n proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()\n s.close()\n squid.update_conf(proxies)\n\n\n@cron_wait\nasync def verify_ok_proxy_task():\n logger.info('run verify_ok_proxy_task')\n await verifier.verify_ok_proxy()\n await verify_error_proxy_task()\n await update_squid_task()\n\n\n@cron_wait\nasync def fetch_new_proxy_task():\n logger.info('run fetch_new_proxy_task')\n await spider.run_spider()\n await verifier.verify_new_proxy()\n await update_squid_task()\n\n\nif __name__ == '__main__':\n logger.info('start')\n loop = asyncio.get_event_loop()\n loop.run_until_complete(update_squid_task())\n msh = Scheduler()\n msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))\n msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))\n try:\n loop.run_until_complete(asyncio.wait([msh.start(), run_api_server()]))\n loop.run_forever()\n except KeyboardInterrupt:\n print('exit')\n",
"step-5": "import asyncio\nimport logging\nimport os\n\nfrom async_cron.job import CronJob\nfrom async_cron.schedule import Scheduler\nfrom sqlalchemy import asc\n\nimport spider\nimport squid\nimport verifier\nfrom db import sess_maker\nfrom model import Proxy, STATUS_OK, STATUS_ERROR\nfrom server import run_api_server\nfrom tool import logger, cron_wait\nfrom verifier import verify_error_proxy\n\nlogging.basicConfig(\n level=logging.INFO,\n datefmt='%Y/%m/%d %H:%M:%S',\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\n\nVERIFY_ERROR_LIMIT = int(os.getenv(\"VERIFY_ERROR_LIMIT\", 100))\nMAX_ERROR_PROXIES = int(os.getenv(\"MAX_ERROR_PROXIES\", 2048))\n\n\n@cron_wait\nasync def verify_error_proxy_task():\n logger.info(\"run verify_error_proxy_task\")\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_OK).count()\n s.close()\n if c < VERIFY_ERROR_LIMIT:\n await verify_error_proxy()\n\n s = sess_maker()\n c = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).count()\n if c > MAX_ERROR_PROXIES:\n res = s.query(Proxy).filter(Proxy.status == STATUS_ERROR).order_by(asc(Proxy.updated_at)).limit(\n c - MAX_ERROR_PROXIES).from_self().all()\n [s.delete(i) for i in res]\n s.commit()\n\n\n@cron_wait\nasync def update_squid_task():\n logger.info(\"run update_squid_task\")\n s = sess_maker()\n proxies = s.query(Proxy).filter(Proxy.status == STATUS_OK).all()\n s.close()\n squid.update_conf(proxies)\n\n\n@cron_wait\nasync def verify_ok_proxy_task():\n logger.info(\"run verify_ok_proxy_task\")\n await verifier.verify_ok_proxy()\n await verify_error_proxy_task()\n await update_squid_task()\n\n\n@cron_wait\nasync def fetch_new_proxy_task():\n logger.info(\"run fetch_new_proxy_task\")\n await spider.run_spider()\n await verifier.verify_new_proxy()\n # await verify_error_proxy_task()\n await update_squid_task()\n\n\nif __name__ == '__main__':\n logger.info(\"start\")\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(update_squid_task())\n\n msh = Scheduler()\n msh.add_job(CronJob().every(10).minute.go(verify_ok_proxy_task))\n msh.add_job(CronJob().every(30).minute.go(fetch_new_proxy_task))\n try:\n loop.run_until_complete(asyncio.wait([\n msh.start(),\n run_api_server(),\n ]))\n loop.run_forever()\n except KeyboardInterrupt:\n print('exit')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# operatorTest02.py
x = 5
x += 3 #복함 대입 연산자
print("x : ", x)
print("-"*30)
total = 0
total += 1
total
|
normal
|
{
"blob_id": "4f8bc19bb113c9eac7c2ac774ac7b16f569d9704",
"index": 3083,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nx += 3\nprint('x : ', x)\nprint('-' * 30)\n<mask token>\ntotal += 1\ntotal\n",
"step-3": "x = 5\nx += 3\nprint('x : ', x)\nprint('-' * 30)\ntotal = 0\ntotal += 1\ntotal\n",
"step-4": "# operatorTest02.py\n\nx = 5\nx += 3 #복함 대입 연산자\nprint(\"x : \", x)\nprint(\"-\"*30)\n\ntotal = 0\ntotal += 1\ntotal ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f'Selamat datang di Game Tebak angka')
while nyawa > limit:
print(f'Percobaan anda tersisa {nyawa}')
jawaban = int(input('Masukkan angka 0-10 = '))
if jawaban == angka_rahasia:
print('Anda Benar')
break
elif nyawa - 1 == limit:
print('Anda Gagal')
break
elif jawaban > angka_rahasia:
print('Lebih')
nyawa -= 1
elif jawaban < angka_rahasia:
print('Kurang')
nyawa -= 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
nyawa = 3
angka_rahasia = randint(0, 10)
limit = 0
print(f'Selamat datang di Game Tebak angka')
while nyawa > limit:
print(f'Percobaan anda tersisa {nyawa}')
jawaban = int(input('Masukkan angka 0-10 = '))
if jawaban == angka_rahasia:
print('Anda Benar')
break
elif nyawa - 1 == limit:
print('Anda Gagal')
break
elif jawaban > angka_rahasia:
print('Lebih')
nyawa -= 1
elif jawaban < angka_rahasia:
print('Kurang')
nyawa -= 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from random import randint
nyawa = 3
angka_rahasia = randint(0, 10)
limit = 0
print(f'Selamat datang di Game Tebak angka')
while nyawa > limit:
print(f'Percobaan anda tersisa {nyawa}')
jawaban = int(input('Masukkan angka 0-10 = '))
if jawaban == angka_rahasia:
print('Anda Benar')
break
elif nyawa - 1 == limit:
print('Anda Gagal')
break
elif jawaban > angka_rahasia:
print('Lebih')
nyawa -= 1
elif jawaban < angka_rahasia:
print('Kurang')
nyawa -= 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# Game Tebak Angka
from random import randint
nyawa = 3
angka_rahasia = randint(0,10)
limit = 0
print(f"Selamat datang di Game Tebak angka")
while nyawa > limit:
print(f"Percobaan anda tersisa {nyawa}")
jawaban = int(input("Masukkan angka 0-10 = "))
if jawaban == angka_rahasia:
print ("Anda Benar")
break
elif nyawa-1 == limit:
print ("Anda Gagal")
break
elif jawaban > angka_rahasia:
print("Lebih")
nyawa -= 1
elif jawaban < angka_rahasia:
print("Kurang")
nyawa -= 1
"""# Game Tebak Angka
from random import randint
# Mengimpor Library Random untuk membuat angka rahasia secara acak
nyawa = 3 # Jumlah percobaan yang di berikan
angka_rahasia = randint(0,10) # Angka rahasia sebagai jawaban game
limit = 0 # Batas nyawa jika nyawa jadi 0 maka pemain akan gagal
print(f"Selamat datang di Game Tebak angka")
while nyawa > limit:
# ini menandakan bahwa game akan berjalan
# jika nyawa lebih besar dari limit
print(f"Percobaan anda tersisa {nyawa}")
# ini untuk memberitahukan pemain jumlah nyawa yang mereka miliki
jawaban = int(input("Masukkan angka 0-10 = "))
# ini untuk menerima angka tebakan dari pemain
if jawaban == angka_rahasia:
print ("Anda Benar")
break
# ini untuk memeriksa apakah angka yang
# di masukan pemain sama dengan angka rahasia
elif nyawa-1 == limit:
print ("Anda Gagal")
break
# Jika jawabannya salah maka nyawanya akan di periksa di sini jika
# nyawanya sudah mencapai limit maka game nya akan selesai
# dan pemain akan kalah
elif jawaban > angka_rahasia:
print("Lebih")
nyawa -= 1
elif jawaban < angka_rahasia:
print("Kurang")
nyawa -= 1
# ini untuk memberikan bantuan kepada pemain apakah angka yang di masukkan
# itu lebih besar atau kurang dari angka rahasia
"""
|
flexible
|
{
"blob_id": "d4b01b015723950a4d8c3453d736cd64f306d27b",
"index": 2940,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'Selamat datang di Game Tebak angka')\nwhile nyawa > limit:\n print(f'Percobaan anda tersisa {nyawa}')\n jawaban = int(input('Masukkan angka 0-10 = '))\n if jawaban == angka_rahasia:\n print('Anda Benar')\n break\n elif nyawa - 1 == limit:\n print('Anda Gagal')\n break\n elif jawaban > angka_rahasia:\n print('Lebih')\n nyawa -= 1\n elif jawaban < angka_rahasia:\n print('Kurang')\n nyawa -= 1\n<mask token>\n",
"step-3": "<mask token>\nnyawa = 3\nangka_rahasia = randint(0, 10)\nlimit = 0\nprint(f'Selamat datang di Game Tebak angka')\nwhile nyawa > limit:\n print(f'Percobaan anda tersisa {nyawa}')\n jawaban = int(input('Masukkan angka 0-10 = '))\n if jawaban == angka_rahasia:\n print('Anda Benar')\n break\n elif nyawa - 1 == limit:\n print('Anda Gagal')\n break\n elif jawaban > angka_rahasia:\n print('Lebih')\n nyawa -= 1\n elif jawaban < angka_rahasia:\n print('Kurang')\n nyawa -= 1\n<mask token>\n",
"step-4": "from random import randint\nnyawa = 3\nangka_rahasia = randint(0, 10)\nlimit = 0\nprint(f'Selamat datang di Game Tebak angka')\nwhile nyawa > limit:\n print(f'Percobaan anda tersisa {nyawa}')\n jawaban = int(input('Masukkan angka 0-10 = '))\n if jawaban == angka_rahasia:\n print('Anda Benar')\n break\n elif nyawa - 1 == limit:\n print('Anda Gagal')\n break\n elif jawaban > angka_rahasia:\n print('Lebih')\n nyawa -= 1\n elif jawaban < angka_rahasia:\n print('Kurang')\n nyawa -= 1\n<mask token>\n",
"step-5": "# Game Tebak Angka \r\n\r\nfrom random import randint \r\n\r\nnyawa = 3 \r\nangka_rahasia = randint(0,10) \r\nlimit = 0 \r\n\r\nprint(f\"Selamat datang di Game Tebak angka\")\r\nwhile nyawa > limit: \r\n print(f\"Percobaan anda tersisa {nyawa}\")\r\n jawaban = int(input(\"Masukkan angka 0-10 = \"))\r\n if jawaban == angka_rahasia:\r\n print (\"Anda Benar\")\r\n break \r\n elif nyawa-1 == limit:\r\n print (\"Anda Gagal\")\r\n break\r\n elif jawaban > angka_rahasia:\r\n print(\"Lebih\")\r\n nyawa -= 1\r\n elif jawaban < angka_rahasia:\r\n print(\"Kurang\")\r\n nyawa -= 1\r\n\r\n\r\n\r\n\r\n\"\"\"# Game Tebak Angka \r\nfrom random import randint \r\n# Mengimpor Library Random untuk membuat angka rahasia secara acak\r\nnyawa = 3 # Jumlah percobaan yang di berikan\r\nangka_rahasia = randint(0,10) # Angka rahasia sebagai jawaban game\r\nlimit = 0 # Batas nyawa jika nyawa jadi 0 maka pemain akan gagal\r\n\r\nprint(f\"Selamat datang di Game Tebak angka\")\r\nwhile nyawa > limit: \r\n# ini menandakan bahwa game akan berjalan \r\n# jika nyawa lebih besar dari limit\r\n print(f\"Percobaan anda tersisa {nyawa}\")\r\n# ini untuk memberitahukan pemain jumlah nyawa yang mereka miliki \r\n jawaban = int(input(\"Masukkan angka 0-10 = \"))\r\n# ini untuk menerima angka tebakan dari pemain\r\n if jawaban == angka_rahasia:\r\n print (\"Anda Benar\")\r\n break\r\n# ini untuk memeriksa apakah angka yang \r\n# di masukan pemain sama dengan angka rahasia \r\n elif nyawa-1 == limit:\r\n print (\"Anda Gagal\")\r\n break\r\n# Jika jawabannya salah maka nyawanya akan di periksa di sini jika \r\n# nyawanya sudah mencapai limit maka game nya akan selesai \r\n# dan pemain akan kalah\r\n elif jawaban > angka_rahasia:\r\n print(\"Lebih\")\r\n nyawa -= 1\r\n elif jawaban < angka_rahasia:\r\n print(\"Kurang\")\r\n nyawa -= 1\r\n# ini untuk memberikan bantuan kepada pemain apakah angka yang di masukkan\r\n# itu lebih besar atau kurang dari angka rahasia\r\n\"\"\"\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
users = {
'Students': [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
],
'Instructors': [
{'first_name' : 'Michael', 'last_name' : 'Choi'},
{'first_name' : 'Martin', 'last_name' : 'Puryear'}
]
}
def findUsers():
num = 1
something = 0
for i in range(len(users)):
for idx in range(len(users.values()[i])):
string = ""
string += "- " + users.values()[i][idx]['first_name'] + " " + users.values()[i][idx]['last_name']
print num, string, " - ", len(users.values()[i][idx]['first_name'] + users.values()[i][idx]['last_name'])
findUsers()
|
normal
|
{
"blob_id": "c0f4f9eef12d99d286f5ad56f6554c5910b7cc71",
"index": 8356,
"step-1": "users = {\n 'Students': [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'},\n {'first_name' : 'Mark', 'last_name' : 'Guillen'},\n {'first_name' : 'KB', 'last_name' : 'Tonel'}\n ],\n 'Instructors': [\n {'first_name' : 'Michael', 'last_name' : 'Choi'},\n {'first_name' : 'Martin', 'last_name' : 'Puryear'}\n ]\n }\n\ndef findUsers():\n num = 1\n something = 0\n for i in range(len(users)):\n for idx in range(len(users.values()[i])):\n string = \"\"\n string += \"- \" + users.values()[i][idx]['first_name'] + \" \" + users.values()[i][idx]['last_name']\n print num, string, \" - \", len(users.values()[i][idx]['first_name'] + users.values()[i][idx]['last_name'])\n\n\n\nfindUsers()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class BaseEncoder(nn.Module):
<|reserved_special_token_0|>
def __init__(self, **kwargs):
if len(kwargs) > 0:
raise RuntimeError('Unrecognized options: {}'.format(', '.join(
kwargs.keys())))
super(BaseEncoder, self).__init__()
<|reserved_special_token_0|>
def get_parameters_for_optimizer(self):
return self.parameters()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseEncoder(nn.Module):
<|reserved_special_token_0|>
def __init__(self, **kwargs):
if len(kwargs) > 0:
raise RuntimeError('Unrecognized options: {}'.format(', '.join(
kwargs.keys())))
super(BaseEncoder, self).__init__()
@abstractmethod
def forward(self, features, features_lengths, spkids):
""" Encode a minibatch of audio features
:param features: float32 tensor of size (bs x t x f x c)
:param features_lengths: int64 tensor of size (bs)
:param spkids: string id of speakers
:returns: A tuple with elements:
- encoded: float32 tensor of size (t x bs x d)
- encoded_lens: int64 tensor of size (bs)
"""
pass
def get_parameters_for_optimizer(self):
return self.parameters()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseEncoder(nn.Module):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
if len(kwargs) > 0:
raise RuntimeError('Unrecognized options: {}'.format(', '.join(
kwargs.keys())))
super(BaseEncoder, self).__init__()
@abstractmethod
def forward(self, features, features_lengths, spkids):
""" Encode a minibatch of audio features
:param features: float32 tensor of size (bs x t x f x c)
:param features_lengths: int64 tensor of size (bs)
:param spkids: string id of speakers
:returns: A tuple with elements:
- encoded: float32 tensor of size (t x bs x d)
- encoded_lens: int64 tensor of size (bs)
"""
pass
def get_parameters_for_optimizer(self):
return self.parameters()
<|reserved_special_token_1|>
from torch import nn
from abc import ABCMeta, abstractmethod
class BaseEncoder(nn.Module):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
if len(kwargs) > 0:
raise RuntimeError('Unrecognized options: {}'.format(', '.join(
kwargs.keys())))
super(BaseEncoder, self).__init__()
@abstractmethod
def forward(self, features, features_lengths, spkids):
""" Encode a minibatch of audio features
:param features: float32 tensor of size (bs x t x f x c)
:param features_lengths: int64 tensor of size (bs)
:param spkids: string id of speakers
:returns: A tuple with elements:
- encoded: float32 tensor of size (t x bs x d)
- encoded_lens: int64 tensor of size (bs)
"""
pass
def get_parameters_for_optimizer(self):
return self.parameters()
<|reserved_special_token_1|>
from torch import nn
from abc import ABCMeta, abstractmethod
class BaseEncoder(nn.Module):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
if len(kwargs) > 0:
raise RuntimeError(
"Unrecognized options: {}".format(', '.join(kwargs.keys())))
super(BaseEncoder, self).__init__()
@abstractmethod
def forward(self, features, features_lengths, spkids):
""" Encode a minibatch of audio features
:param features: float32 tensor of size (bs x t x f x c)
:param features_lengths: int64 tensor of size (bs)
:param spkids: string id of speakers
:returns: A tuple with elements:
- encoded: float32 tensor of size (t x bs x d)
- encoded_lens: int64 tensor of size (bs)
"""
pass
def get_parameters_for_optimizer(self):
return self.parameters()
|
flexible
|
{
"blob_id": "86ee2300b5270df3dadb22f2cfea626e6556e5db",
"index": 9951,
"step-1": "<mask token>\n\n\nclass BaseEncoder(nn.Module):\n <mask token>\n\n def __init__(self, **kwargs):\n if len(kwargs) > 0:\n raise RuntimeError('Unrecognized options: {}'.format(', '.join(\n kwargs.keys())))\n super(BaseEncoder, self).__init__()\n <mask token>\n\n def get_parameters_for_optimizer(self):\n return self.parameters()\n",
"step-2": "<mask token>\n\n\nclass BaseEncoder(nn.Module):\n <mask token>\n\n def __init__(self, **kwargs):\n if len(kwargs) > 0:\n raise RuntimeError('Unrecognized options: {}'.format(', '.join(\n kwargs.keys())))\n super(BaseEncoder, self).__init__()\n\n @abstractmethod\n def forward(self, features, features_lengths, spkids):\n \"\"\" Encode a minibatch of audio features\n\n :param features: float32 tensor of size (bs x t x f x c)\n :param features_lengths: int64 tensor of size (bs)\n :param spkids: string id of speakers\n :returns: A tuple with elements:\n - encoded: float32 tensor of size (t x bs x d)\n - encoded_lens: int64 tensor of size (bs)\n \"\"\"\n pass\n\n def get_parameters_for_optimizer(self):\n return self.parameters()\n",
"step-3": "<mask token>\n\n\nclass BaseEncoder(nn.Module):\n __metaclass__ = ABCMeta\n\n def __init__(self, **kwargs):\n if len(kwargs) > 0:\n raise RuntimeError('Unrecognized options: {}'.format(', '.join(\n kwargs.keys())))\n super(BaseEncoder, self).__init__()\n\n @abstractmethod\n def forward(self, features, features_lengths, spkids):\n \"\"\" Encode a minibatch of audio features\n\n :param features: float32 tensor of size (bs x t x f x c)\n :param features_lengths: int64 tensor of size (bs)\n :param spkids: string id of speakers\n :returns: A tuple with elements:\n - encoded: float32 tensor of size (t x bs x d)\n - encoded_lens: int64 tensor of size (bs)\n \"\"\"\n pass\n\n def get_parameters_for_optimizer(self):\n return self.parameters()\n",
"step-4": "from torch import nn\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseEncoder(nn.Module):\n __metaclass__ = ABCMeta\n\n def __init__(self, **kwargs):\n if len(kwargs) > 0:\n raise RuntimeError('Unrecognized options: {}'.format(', '.join(\n kwargs.keys())))\n super(BaseEncoder, self).__init__()\n\n @abstractmethod\n def forward(self, features, features_lengths, spkids):\n \"\"\" Encode a minibatch of audio features\n\n :param features: float32 tensor of size (bs x t x f x c)\n :param features_lengths: int64 tensor of size (bs)\n :param spkids: string id of speakers\n :returns: A tuple with elements:\n - encoded: float32 tensor of size (t x bs x d)\n - encoded_lens: int64 tensor of size (bs)\n \"\"\"\n pass\n\n def get_parameters_for_optimizer(self):\n return self.parameters()\n",
"step-5": "from torch import nn\nfrom abc import ABCMeta, abstractmethod\n\nclass BaseEncoder(nn.Module):\n __metaclass__ = ABCMeta\n\n def __init__(self, **kwargs):\n if len(kwargs) > 0:\n raise RuntimeError(\n \"Unrecognized options: {}\".format(', '.join(kwargs.keys())))\n super(BaseEncoder, self).__init__()\n\n @abstractmethod\n def forward(self, features, features_lengths, spkids):\n \"\"\" Encode a minibatch of audio features\n\n :param features: float32 tensor of size (bs x t x f x c)\n :param features_lengths: int64 tensor of size (bs)\n :param spkids: string id of speakers\n :returns: A tuple with elements:\n - encoded: float32 tensor of size (t x bs x d)\n - encoded_lens: int64 tensor of size (bs)\n \"\"\"\n pass\n\n def get_parameters_for_optimizer(self):\n return self.parameters()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Root(object):
<|reserved_special_token_0|>
def __init__(self, request):
pass
<|reserved_special_token_1|>
__author__ = 'anderson'
<|reserved_special_token_0|>
class Root(object):
__acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',
ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]
def __init__(self, request):
pass
<|reserved_special_token_1|>
__author__ = 'anderson'
from pyramid.security import Everyone, Allow, ALL_PERMISSIONS
class Root(object):
__acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',
ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]
def __init__(self, request):
pass
<|reserved_special_token_1|>
__author__ = 'anderson'
from pyramid.security import Everyone, Allow, ALL_PERMISSIONS
class Root(object):
#Access Control List
__acl__ = [(Allow, Everyone, 'view'),
(Allow, 'role_admin', ALL_PERMISSIONS),
(Allow, 'role_usuario', 'comum')]
def __init__(self, request):
pass
|
flexible
|
{
"blob_id": "5ee2a51ea981f0feab688d9c571620a95d89a422",
"index": 6980,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Root(object):\n <mask token>\n\n def __init__(self, request):\n pass\n",
"step-3": "__author__ = 'anderson'\n<mask token>\n\n\nclass Root(object):\n __acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',\n ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-4": "__author__ = 'anderson'\nfrom pyramid.security import Everyone, Allow, ALL_PERMISSIONS\n\n\nclass Root(object):\n __acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',\n ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-5": "__author__ = 'anderson'\nfrom pyramid.security import Everyone, Allow, ALL_PERMISSIONS\n\n\nclass Root(object):\n #Access Control List\n __acl__ = [(Allow, Everyone, 'view'),\n (Allow, 'role_admin', ALL_PERMISSIONS),\n (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-ids": [
0,
2,
4,
5,
6
]
}
|
[
0,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class Cart(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
ordering = ['status', 'creation_date']
db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)
verbose_name = _('oscm_admin_headerOfCart')
verbose_name_plural = _('oscm_admin_headerOfCarts')
<|reserved_special_token_0|>
def __str__(self):
"""
Displays the status, the owner, the project
name and the number of cart items.
"""
return _(
'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'
) % {'status': self.CART_STATUSES[self.status][1], 'owner':
self.owner, 'project_name': self.project_name, 'nb_cart_items':
self.nb_cart_items, 'total_amount': self.total_amount}
def get_cart_items(self):
"""
Retrieves all cart items for a given cart.
"""
return CartItem.objects.filter(cart=self)
@property
def nb_cart_items(self):
"""
Retrieves the number of distinct cart items for a given cart.
"""
return CartItem.objects.filter(cart=self).count()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_absolute_url(self):
return reverse('oscm:cart', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('oscm:delete_cart', kwargs={'pk': self.pk})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cart(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
ordering = ['status', 'creation_date']
db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)
verbose_name = _('oscm_admin_headerOfCart')
verbose_name_plural = _('oscm_admin_headerOfCarts')
<|reserved_special_token_0|>
def __str__(self):
"""
Displays the status, the owner, the project
name and the number of cart items.
"""
return _(
'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'
) % {'status': self.CART_STATUSES[self.status][1], 'owner':
self.owner, 'project_name': self.project_name, 'nb_cart_items':
self.nb_cart_items, 'total_amount': self.total_amount}
def get_cart_items(self):
"""
Retrieves all cart items for a given cart.
"""
return CartItem.objects.filter(cart=self)
@property
def nb_cart_items(self):
"""
Retrieves the number of distinct cart items for a given cart.
"""
return CartItem.objects.filter(cart=self).count()
@property
def total_amount(self):
"""
Retrieves the total amount of cart items for a given cart.
"""
total_amount = 0
for cart_item in self.get_cart_items():
total_amount += cart_item.total_price
return total_amount
@property
def is_empty(self):
"""
Test if this cart is empty.
"""
return self.id is None or self.nb_cart_items == 0
def get_absolute_url(self):
return reverse('oscm:cart', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('oscm:delete_cart', kwargs={'pk': self.pk})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cart(models.Model):
<|reserved_special_token_0|>
owner = models.ForeignKey(get_attr('AUTH_USER_MODEL'), blank=False,
related_name='carts', verbose_name=_('oscm_admin_ownerOfCart'),
help_text=_('oscm_admin_helpTextOwnerOfCart'), limit_choices_to={
'role': get_attr('DEFAULT_ROLE')})
project_name = models.CharField(verbose_name=_(
'oscm_admin_projectNameOfCart'), help_text=_(
'oscm_admin_helpTextProjectNameOfCart'), max_length=250, blank=
False, null=False)
creation_date = models.DateTimeField(verbose_name=_(
'oscm_admin_creationDateOfCart'), auto_now_add=True)
last_edit_date = models.DateTimeField(verbose_name=_(
'oscm_admin_lastEditDateOfCart'), auto_now=True)
requested_due_date = models.DateTimeField(verbose_name=_(
'oscm_admin_requestedDueDateOfCart'), help_text=_(
'oscm_admin_helpTextRequestedDueDateOfCart'))
DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)
CART_STATUSES = get_attr(CART_STATUSES)
status = models.IntegerField(verbose_name=_('oscm_admin_statusOfCart'),
max_length=32, default=DEFAULT_CART_STATUS, choices=CART_STATUSES,
help_text=_('oscm_admin_helpTextStatusOfCart'))
description = models.TextField(verbose_name=_(
'oscm_admin_descriptionOfCart'), blank=True, help_text=_(
'oscm_admin_helpTextDescriptionOfCart'))
class Meta:
ordering = ['status', 'creation_date']
db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)
verbose_name = _('oscm_admin_headerOfCart')
verbose_name_plural = _('oscm_admin_headerOfCarts')
objects = CartQuerySet.as_manager()
def __str__(self):
"""
Displays the status, the owner, the project
name and the number of cart items.
"""
return _(
'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'
) % {'status': self.CART_STATUSES[self.status][1], 'owner':
self.owner, 'project_name': self.project_name, 'nb_cart_items':
self.nb_cart_items, 'total_amount': self.total_amount}
def get_cart_items(self):
"""
Retrieves all cart items for a given cart.
"""
return CartItem.objects.filter(cart=self)
@property
def nb_cart_items(self):
"""
Retrieves the number of distinct cart items for a given cart.
"""
return CartItem.objects.filter(cart=self).count()
@property
def total_amount(self):
"""
Retrieves the total amount of cart items for a given cart.
"""
total_amount = 0
for cart_item in self.get_cart_items():
total_amount += cart_item.total_price
return total_amount
@property
def is_empty(self):
"""
Test if this cart is empty.
"""
return self.id is None or self.nb_cart_items == 0
def get_absolute_url(self):
return reverse('oscm:cart', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('oscm:delete_cart', kwargs={'pk': self.pk})
<|reserved_special_token_1|>
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ...constants import CARTS, CART_STATUSES, DEFAULT_CART_STATUS
from ...utils import get_attr
from ..cart_manager import CartQuerySet
from .cart_item import CartItem
class Cart(models.Model):
"""
This class is used to represent the Cart for the users.
"""
owner = models.ForeignKey(get_attr('AUTH_USER_MODEL'), blank=False,
related_name='carts', verbose_name=_('oscm_admin_ownerOfCart'),
help_text=_('oscm_admin_helpTextOwnerOfCart'), limit_choices_to={
'role': get_attr('DEFAULT_ROLE')})
project_name = models.CharField(verbose_name=_(
'oscm_admin_projectNameOfCart'), help_text=_(
'oscm_admin_helpTextProjectNameOfCart'), max_length=250, blank=
False, null=False)
creation_date = models.DateTimeField(verbose_name=_(
'oscm_admin_creationDateOfCart'), auto_now_add=True)
last_edit_date = models.DateTimeField(verbose_name=_(
'oscm_admin_lastEditDateOfCart'), auto_now=True)
requested_due_date = models.DateTimeField(verbose_name=_(
'oscm_admin_requestedDueDateOfCart'), help_text=_(
'oscm_admin_helpTextRequestedDueDateOfCart'))
DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)
CART_STATUSES = get_attr(CART_STATUSES)
status = models.IntegerField(verbose_name=_('oscm_admin_statusOfCart'),
max_length=32, default=DEFAULT_CART_STATUS, choices=CART_STATUSES,
help_text=_('oscm_admin_helpTextStatusOfCart'))
description = models.TextField(verbose_name=_(
'oscm_admin_descriptionOfCart'), blank=True, help_text=_(
'oscm_admin_helpTextDescriptionOfCart'))
class Meta:
ordering = ['status', 'creation_date']
db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)
verbose_name = _('oscm_admin_headerOfCart')
verbose_name_plural = _('oscm_admin_headerOfCarts')
objects = CartQuerySet.as_manager()
def __str__(self):
"""
Displays the status, the owner, the project
name and the number of cart items.
"""
return _(
'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'
) % {'status': self.CART_STATUSES[self.status][1], 'owner':
self.owner, 'project_name': self.project_name, 'nb_cart_items':
self.nb_cart_items, 'total_amount': self.total_amount}
def get_cart_items(self):
"""
Retrieves all cart items for a given cart.
"""
return CartItem.objects.filter(cart=self)
@property
def nb_cart_items(self):
"""
Retrieves the number of distinct cart items for a given cart.
"""
return CartItem.objects.filter(cart=self).count()
@property
def total_amount(self):
"""
Retrieves the total amount of cart items for a given cart.
"""
total_amount = 0
for cart_item in self.get_cart_items():
total_amount += cart_item.total_price
return total_amount
@property
def is_empty(self):
"""
Test if this cart is empty.
"""
return self.id is None or self.nb_cart_items == 0
def get_absolute_url(self):
return reverse('oscm:cart', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('oscm:delete_cart', kwargs={'pk': self.pk})
<|reserved_special_token_1|>
# coding=utf-8
# oscm_app/cart/models
# django imports
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
# OSCM imports
from ...constants import CARTS, CART_STATUSES, DEFAULT_CART_STATUS
from ...utils import get_attr
from ..cart_manager import CartQuerySet
from .cart_item import CartItem
class Cart(models.Model):
"""
This class is used to represent the Cart for the users.
"""
# Owner of the cart
owner = models.ForeignKey(
get_attr('AUTH_USER_MODEL'),
blank=False,
related_name='carts',
verbose_name=_("oscm_admin_ownerOfCart"),
help_text=_('oscm_admin_helpTextOwnerOfCart'),
limit_choices_to={'role': get_attr('DEFAULT_ROLE')},
)
# Project name
project_name = models.CharField(
verbose_name=_('oscm_admin_projectNameOfCart'),
help_text=_('oscm_admin_helpTextProjectNameOfCart'),
max_length=250,
blank=False,
null=False
)
# Creation date
creation_date = models.DateTimeField(
verbose_name=_('oscm_admin_creationDateOfCart'),
auto_now_add=True,
)
# Last edit date
last_edit_date = models.DateTimeField(
verbose_name=_('oscm_admin_lastEditDateOfCart'),
auto_now=True,
)
# Requested due date
requested_due_date = models.DateTimeField(
verbose_name=_('oscm_admin_requestedDueDateOfCart'),
help_text=_('oscm_admin_helpTextRequestedDueDateOfCart'),
)
# Default parameter for the status attribute
DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)
# Retrieved the different statuses from the settings file
CART_STATUSES = get_attr(CART_STATUSES)
# Status
status = models.IntegerField(
verbose_name=_('oscm_admin_statusOfCart'),
max_length=32,
default=DEFAULT_CART_STATUS,
choices=CART_STATUSES,
help_text=_('oscm_admin_helpTextStatusOfCart'),
)
# Short description about the cart
description = models.TextField(
verbose_name=_("oscm_admin_descriptionOfCart"),
blank=True,
help_text=_('oscm_admin_helpTextDescriptionOfCart'),
)
# Item count (not equal to quantity, but distinct item count)
class Meta:
ordering = ["status", "creation_date", ]
db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)
verbose_name = _('oscm_admin_headerOfCart')
verbose_name_plural = _('oscm_admin_headerOfCarts')
objects = CartQuerySet.as_manager()
def __str__(self):
"""
Displays the status, the owner, the project
name and the number of cart items.
"""
return _(
"cart (status: %(status)s, owner: %(owner)s, project name: "
"%(project_name)s, number of cart items: %(nb_cart_items)d, "
"total amount: %(total_amount)d)"
) % {
'status': self.CART_STATUSES[self.status][1],
'owner': self.owner,
'project_name': self.project_name,
'nb_cart_items': self.nb_cart_items,
'total_amount': self.total_amount,
}
def get_cart_items(self):
"""
Retrieves all cart items for a given cart.
"""
return CartItem.objects.filter(cart=self)
@property
def nb_cart_items(self):
"""
Retrieves the number of distinct cart items for a given cart.
"""
return CartItem.objects.filter(cart=self).count()
@property
def total_amount(self):
"""
Retrieves the total amount of cart items for a given cart.
"""
total_amount = 0
for cart_item in self.get_cart_items():
total_amount += cart_item.total_price
return total_amount
@property
def is_empty(self):
"""
Test if this cart is empty.
"""
return self.id is None or self.nb_cart_items == 0
def get_absolute_url(self):
return reverse(
'oscm:cart',
kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse(
'oscm:delete_cart',
kwargs={'pk': self.pk})
|
flexible
|
{
"blob_id": "ae0ccbb9b0a2c61d9ee9615ba8d0c1a186a81c34",
"index": 3177,
"step-1": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n <mask token>\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-2": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n <mask token>\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-3": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n owner = models.ForeignKey(get_attr('AUTH_USER_MODEL'), blank=False,\n related_name='carts', verbose_name=_('oscm_admin_ownerOfCart'),\n help_text=_('oscm_admin_helpTextOwnerOfCart'), limit_choices_to={\n 'role': get_attr('DEFAULT_ROLE')})\n project_name = models.CharField(verbose_name=_(\n 'oscm_admin_projectNameOfCart'), help_text=_(\n 'oscm_admin_helpTextProjectNameOfCart'), max_length=250, blank=\n False, null=False)\n creation_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_creationDateOfCart'), auto_now_add=True)\n last_edit_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_lastEditDateOfCart'), auto_now=True)\n requested_due_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_requestedDueDateOfCart'), help_text=_(\n 'oscm_admin_helpTextRequestedDueDateOfCart'))\n DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)\n CART_STATUSES = get_attr(CART_STATUSES)\n status = models.IntegerField(verbose_name=_('oscm_admin_statusOfCart'),\n max_length=32, default=DEFAULT_CART_STATUS, choices=CART_STATUSES,\n help_text=_('oscm_admin_helpTextStatusOfCart'))\n description = models.TextField(verbose_name=_(\n 'oscm_admin_descriptionOfCart'), blank=True, help_text=_(\n 'oscm_admin_helpTextDescriptionOfCart'))\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n objects = CartQuerySet.as_manager()\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-4": "from django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom ...constants import CARTS, CART_STATUSES, DEFAULT_CART_STATUS\nfrom ...utils import get_attr\nfrom ..cart_manager import CartQuerySet\nfrom .cart_item import CartItem\n\n\nclass Cart(models.Model):\n \"\"\"\n This class is used to represent the Cart for the users.\n \"\"\"\n owner = models.ForeignKey(get_attr('AUTH_USER_MODEL'), blank=False,\n related_name='carts', verbose_name=_('oscm_admin_ownerOfCart'),\n help_text=_('oscm_admin_helpTextOwnerOfCart'), limit_choices_to={\n 'role': get_attr('DEFAULT_ROLE')})\n project_name = models.CharField(verbose_name=_(\n 'oscm_admin_projectNameOfCart'), help_text=_(\n 'oscm_admin_helpTextProjectNameOfCart'), max_length=250, blank=\n False, null=False)\n creation_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_creationDateOfCart'), auto_now_add=True)\n last_edit_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_lastEditDateOfCart'), auto_now=True)\n requested_due_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_requestedDueDateOfCart'), help_text=_(\n 'oscm_admin_helpTextRequestedDueDateOfCart'))\n DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)\n CART_STATUSES = get_attr(CART_STATUSES)\n status = models.IntegerField(verbose_name=_('oscm_admin_statusOfCart'),\n max_length=32, default=DEFAULT_CART_STATUS, choices=CART_STATUSES,\n help_text=_('oscm_admin_helpTextStatusOfCart'))\n description = models.TextField(verbose_name=_(\n 'oscm_admin_descriptionOfCart'), blank=True, help_text=_(\n 'oscm_admin_helpTextDescriptionOfCart'))\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n objects = CartQuerySet.as_manager()\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-5": "# coding=utf-8\n# oscm_app/cart/models\n\n# django imports\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n# OSCM imports\nfrom ...constants import CARTS, CART_STATUSES, DEFAULT_CART_STATUS\nfrom ...utils import get_attr\nfrom ..cart_manager import CartQuerySet\nfrom .cart_item import CartItem\n\n\nclass Cart(models.Model):\n\n \"\"\"\n This class is used to represent the Cart for the users.\n \"\"\"\n\n # Owner of the cart\n owner = models.ForeignKey(\n get_attr('AUTH_USER_MODEL'),\n blank=False,\n related_name='carts',\n verbose_name=_(\"oscm_admin_ownerOfCart\"),\n help_text=_('oscm_admin_helpTextOwnerOfCart'),\n limit_choices_to={'role': get_attr('DEFAULT_ROLE')},\n )\n\n # Project name\n project_name = models.CharField(\n verbose_name=_('oscm_admin_projectNameOfCart'),\n help_text=_('oscm_admin_helpTextProjectNameOfCart'),\n max_length=250,\n blank=False,\n null=False\n )\n\n # Creation date\n creation_date = models.DateTimeField(\n verbose_name=_('oscm_admin_creationDateOfCart'),\n auto_now_add=True,\n )\n # Last edit date\n last_edit_date = models.DateTimeField(\n verbose_name=_('oscm_admin_lastEditDateOfCart'),\n auto_now=True,\n )\n # Requested due date\n requested_due_date = models.DateTimeField(\n verbose_name=_('oscm_admin_requestedDueDateOfCart'),\n help_text=_('oscm_admin_helpTextRequestedDueDateOfCart'),\n )\n\n # Default parameter for the status attribute\n DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)\n # Retrieved the different statuses from the settings file\n CART_STATUSES = get_attr(CART_STATUSES)\n # Status\n status = models.IntegerField(\n verbose_name=_('oscm_admin_statusOfCart'),\n max_length=32,\n default=DEFAULT_CART_STATUS,\n choices=CART_STATUSES,\n help_text=_('oscm_admin_helpTextStatusOfCart'),\n )\n # Short description about the cart\n description = models.TextField(\n verbose_name=_(\"oscm_admin_descriptionOfCart\"),\n blank=True,\n help_text=_('oscm_admin_helpTextDescriptionOfCart'),\n )\n # Item count (not equal to quantity, but distinct item count)\n\n class Meta:\n ordering = [\"status\", \"creation_date\", ]\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n\n objects = CartQuerySet.as_manager()\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n \"cart (status: %(status)s, owner: %(owner)s, project name: \"\n \"%(project_name)s, number of cart items: %(nb_cart_items)d, \"\n \"total amount: %(total_amount)d)\"\n ) % {\n 'status': self.CART_STATUSES[self.status][1],\n 'owner': self.owner,\n 'project_name': self.project_name,\n 'nb_cart_items': self.nb_cart_items,\n 'total_amount': self.total_amount,\n }\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse(\n 'oscm:cart',\n kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse(\n 'oscm:delete_cart',\n kwargs={'pk': self.pk})\n",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
class BlockwiseLayer(object):
<|reserved_special_token_0|>
def __init__(self, parent):
"""
Initialize a Blockwise Layer.
:type parent: coapserver.CoAP
:param parent: the CoAP server
"""
self._parent = parent
def handle_request(self, request):
"""
Store Blockwise parameter required by clients
:param request: the request message
:return: M bit, request
"""
ret = True
for option in request.options:
if option.number == defines.inv_options['Block2']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
if key in self._parent.blockwise:
block, byte, num2, m2, size2 = self._parent.blockwise[key]
if block == 2:
self._parent.blockwise[key] = 2, byte, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
elif option.number == defines.inv_options['Block1']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
self._parent.blockwise[key] = 1, 0, num, m, size
if m == 0:
del self._parent.blockwise[key]
ret = False
return ret, request
def start_block2(self, request):
"""
Initialize a blockwise response. Used if payload > 1024
:param request: the request message
"""
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
self._parent.blockwise[key] = 2, 0, 0, 1, 1024
def handle_response(self, key, response, resource):
"""
Handle Blockwise in responses.
:param key: key parameter to search inside the dictionary
:param response: the response message
:param resource: the request message
:return: the new response
"""
block, byte, num, m, size = self._parent.blockwise[key]
payload = resource.payload
if block == 2:
ret = payload[byte:byte + size]
if len(ret) == size:
m = 1
else:
m = 0
response.block2 = num, m, size
response.payload = ret
byte += size
num += 1
if m == 0:
del self._parent.blockwise[key]
else:
self._parent.blockwise[key] = 2, byte, num, m, size
elif block == 1:
if m == 1:
response.code = defines.responses['CONTINUE']
response.block1 = num, m, size
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BlockwiseLayer(object):
"""
Handles the Blockwise feature.
"""
def __init__(self, parent):
"""
Initialize a Blockwise Layer.
:type parent: coapserver.CoAP
:param parent: the CoAP server
"""
self._parent = parent
def handle_request(self, request):
"""
Store Blockwise parameter required by clients
:param request: the request message
:return: M bit, request
"""
ret = True
for option in request.options:
if option.number == defines.inv_options['Block2']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
if key in self._parent.blockwise:
block, byte, num2, m2, size2 = self._parent.blockwise[key]
if block == 2:
self._parent.blockwise[key] = 2, byte, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
elif option.number == defines.inv_options['Block1']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
self._parent.blockwise[key] = 1, 0, num, m, size
if m == 0:
del self._parent.blockwise[key]
ret = False
return ret, request
def start_block2(self, request):
"""
Initialize a blockwise response. Used if payload > 1024
:param request: the request message
"""
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
self._parent.blockwise[key] = 2, 0, 0, 1, 1024
def handle_response(self, key, response, resource):
"""
Handle Blockwise in responses.
:param key: key parameter to search inside the dictionary
:param response: the response message
:param resource: the request message
:return: the new response
"""
block, byte, num, m, size = self._parent.blockwise[key]
payload = resource.payload
if block == 2:
ret = payload[byte:byte + size]
if len(ret) == size:
m = 1
else:
m = 0
response.block2 = num, m, size
response.payload = ret
byte += size
num += 1
if m == 0:
del self._parent.blockwise[key]
else:
self._parent.blockwise[key] = 2, byte, num, m, size
elif block == 1:
if m == 1:
response.code = defines.responses['CONTINUE']
response.block1 = num, m, size
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Giacomo Tanganelli'
__version__ = '2.0'
class BlockwiseLayer(object):
"""
Handles the Blockwise feature.
"""
def __init__(self, parent):
"""
Initialize a Blockwise Layer.
:type parent: coapserver.CoAP
:param parent: the CoAP server
"""
self._parent = parent
def handle_request(self, request):
"""
Store Blockwise parameter required by clients
:param request: the request message
:return: M bit, request
"""
ret = True
for option in request.options:
if option.number == defines.inv_options['Block2']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
if key in self._parent.blockwise:
block, byte, num2, m2, size2 = self._parent.blockwise[key]
if block == 2:
self._parent.blockwise[key] = 2, byte, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
elif option.number == defines.inv_options['Block1']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
self._parent.blockwise[key] = 1, 0, num, m, size
if m == 0:
del self._parent.blockwise[key]
ret = False
return ret, request
def start_block2(self, request):
"""
Initialize a blockwise response. Used if payload > 1024
:param request: the request message
"""
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
self._parent.blockwise[key] = 2, 0, 0, 1, 1024
def handle_response(self, key, response, resource):
"""
Handle Blockwise in responses.
:param key: key parameter to search inside the dictionary
:param response: the response message
:param resource: the request message
:return: the new response
"""
block, byte, num, m, size = self._parent.blockwise[key]
payload = resource.payload
if block == 2:
ret = payload[byte:byte + size]
if len(ret) == size:
m = 1
else:
m = 0
response.block2 = num, m, size
response.payload = ret
byte += size
num += 1
if m == 0:
del self._parent.blockwise[key]
else:
self._parent.blockwise[key] = 2, byte, num, m, size
elif block == 1:
if m == 1:
response.code = defines.responses['CONTINUE']
response.block1 = num, m, size
return response
<|reserved_special_token_1|>
import struct
from coapthon import defines
from coapthon.utils import byte_len, bit_len, parse_blockwise
__author__ = 'Giacomo Tanganelli'
__version__ = '2.0'
class BlockwiseLayer(object):
"""
Handles the Blockwise feature.
"""
def __init__(self, parent):
"""
Initialize a Blockwise Layer.
:type parent: coapserver.CoAP
:param parent: the CoAP server
"""
self._parent = parent
def handle_request(self, request):
"""
Store Blockwise parameter required by clients
:param request: the request message
:return: M bit, request
"""
ret = True
for option in request.options:
if option.number == defines.inv_options['Block2']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
if key in self._parent.blockwise:
block, byte, num2, m2, size2 = self._parent.blockwise[key]
if block == 2:
self._parent.blockwise[key] = 2, byte, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
else:
self._parent.blockwise[key] = 2, 0, num, m, size
elif option.number == defines.inv_options['Block1']:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
self._parent.blockwise[key] = 1, 0, num, m, size
if m == 0:
del self._parent.blockwise[key]
ret = False
return ret, request
def start_block2(self, request):
"""
Initialize a blockwise response. Used if payload > 1024
:param request: the request message
"""
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
self._parent.blockwise[key] = 2, 0, 0, 1, 1024
def handle_response(self, key, response, resource):
"""
Handle Blockwise in responses.
:param key: key parameter to search inside the dictionary
:param response: the response message
:param resource: the request message
:return: the new response
"""
block, byte, num, m, size = self._parent.blockwise[key]
payload = resource.payload
if block == 2:
ret = payload[byte:byte + size]
if len(ret) == size:
m = 1
else:
m = 0
response.block2 = num, m, size
response.payload = ret
byte += size
num += 1
if m == 0:
del self._parent.blockwise[key]
else:
self._parent.blockwise[key] = 2, byte, num, m, size
elif block == 1:
if m == 1:
response.code = defines.responses['CONTINUE']
response.block1 = num, m, size
return response
<|reserved_special_token_1|>
import struct
from coapthon import defines
from coapthon.utils import byte_len, bit_len, parse_blockwise
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class BlockwiseLayer(object):
"""
Handles the Blockwise feature.
"""
def __init__(self, parent):
"""
Initialize a Blockwise Layer.
:type parent: coapserver.CoAP
:param parent: the CoAP server
"""
self._parent = parent
def handle_request(self, request):
"""
Store Blockwise parameter required by clients
:param request: the request message
:return: M bit, request
"""
ret = True
for option in request.options:
if option.number == defines.inv_options["Block2"]:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
# remember choices
if key in self._parent.blockwise:
block, byte, num2, m2, size2 = self._parent.blockwise[key]
if block == 2:
self._parent.blockwise[key] = (2, byte, num, m, size)
else:
self._parent.blockwise[key] = (2, 0, num, m, size)
else:
self._parent.blockwise[key] = (2, 0, num, m, size)
elif option.number == defines.inv_options["Block1"]:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
# remember choices
self._parent.blockwise[key] = (1, 0, num, m, size)
if m == 0:
del self._parent.blockwise[key]
ret = False
return ret, request
def start_block2(self, request):
"""
Initialize a blockwise response. Used if payload > 1024
:param request: the request message
"""
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
self._parent.blockwise[key] = (2, 0, 0, 1, 1024)
def handle_response(self, key, response, resource):
"""
Handle Blockwise in responses.
:param key: key parameter to search inside the dictionary
:param response: the response message
:param resource: the request message
:return: the new response
"""
block, byte, num, m, size = self._parent.blockwise[key]
payload = resource.payload
if block == 2:
ret = payload[byte:byte + size]
if len(ret) == size:
m = 1
else:
m = 0
response.block2 = (num, m, size)
response.payload = ret
byte += size
num += 1
if m == 0:
del self._parent.blockwise[key]
else:
self._parent.blockwise[key] = (2, byte, num, m, size)
elif block == 1:
if m == 1:
response.code = defines.responses["CONTINUE"]
response.block1 = (num, m, size)
return response
|
flexible
|
{
"blob_id": "70d740a7003ca3f2d2cde039b2fc470ef2165e77",
"index": 7078,
"step-1": "<mask token>\n\n\nclass BlockwiseLayer(object):\n <mask token>\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-2": "<mask token>\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-3": "<mask token>\n__author__ = 'Giacomo Tanganelli'\n__version__ = '2.0'\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-4": "import struct\nfrom coapthon import defines\nfrom coapthon.utils import byte_len, bit_len, parse_blockwise\n__author__ = 'Giacomo Tanganelli'\n__version__ = '2.0'\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-5": "import struct\nfrom coapthon import defines\nfrom coapthon.utils import byte_len, bit_len, parse_blockwise\n\n__author__ = 'Giacomo Tanganelli'\n__version__ = \"2.0\"\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options[\"Block2\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n elif option.number == defines.inv_options[\"Block1\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n self._parent.blockwise[key] = (1, 0, num, m, size)\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = (2, 0, 0, 1, 1024)\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = (num, m, size)\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n\n elif block == 1:\n if m == 1:\n response.code = defines.responses[\"CONTINUE\"]\n response.block1 = (num, m, size)\n return response\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(getal1 * getal2 + getal3)
print(getal1 * (getal2 + getal3))
print(getal2 + getal3 / getal1)
print((getal2 + getal3) / getal1)
print(getal2 + getal3 % getal1)
print(abs(getal4 * getal1))
print(pow(getal3, getal5))
print(round(getal5 / getal2, 2))
print(max(getal1, getal2, getal3, getal4, getal5))
print(min(getal1, getal2, getal3, getal4, getal5))
print(math.sqrt(getal5 * getal3))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
getal1 = 5
getal2 = 7
getal3 = 8
getal4 = -4
getal5 = 2
print(getal1 * getal2 + getal3)
print(getal1 * (getal2 + getal3))
print(getal2 + getal3 / getal1)
print((getal2 + getal3) / getal1)
print(getal2 + getal3 % getal1)
print(abs(getal4 * getal1))
print(pow(getal3, getal5))
print(round(getal5 / getal2, 2))
print(max(getal1, getal2, getal3, getal4, getal5))
print(min(getal1, getal2, getal3, getal4, getal5))
print(math.sqrt(getal5 * getal3))
<|reserved_special_token_1|>
import math
getal1 = 5
getal2 = 7
getal3 = 8
getal4 = -4
getal5 = 2
print(getal1 * getal2 + getal3)
print(getal1 * (getal2 + getal3))
print(getal2 + getal3 / getal1)
print((getal2 + getal3) / getal1)
print(getal2 + getal3 % getal1)
print(abs(getal4 * getal1))
print(pow(getal3, getal5))
print(round(getal5 / getal2, 2))
print(max(getal1, getal2, getal3, getal4, getal5))
print(min(getal1, getal2, getal3, getal4, getal5))
print(math.sqrt(getal5 * getal3))
|
flexible
|
{
"blob_id": "30d75aafd9612ac02557b947fc4e3c2f7322a7fd",
"index": 3555,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n",
"step-3": "<mask token>\ngetal1 = 5\ngetal2 = 7\ngetal3 = 8\ngetal4 = -4\ngetal5 = 2\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n",
"step-4": "import math\ngetal1 = 5\ngetal2 = 7\ngetal3 = 8\ngetal4 = -4\ngetal5 = 2\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/python3
from scapy.all import *
import sys
ip=IP(src=sys.argv[1], dst=sys.argv[2])
syn_packet = TCP(sport=52255, dport=1237, flags="S", seq=100, options=[('MSS',689),('WScale',1)])
synack_packet = sr1(ip/syn_packet)
my_ack = synack_packet.seq+1
ack_packet = TCP(sport=52255, dport=1237, flags="A", seq=101, ack=my_ack)
send(ip/ack_packet)
|
normal
|
{
"blob_id": "acd6197e60cf59ffcaa33bb50a60a03592bb3559",
"index": 7169,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsend(ip / ack_packet)\n",
"step-3": "<mask token>\nip = IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[(\n 'MSS', 689), ('WScale', 1)])\nsynack_packet = sr1(ip / syn_packet)\nmy_ack = synack_packet.seq + 1\nack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack)\nsend(ip / ack_packet)\n",
"step-4": "from scapy.all import *\nimport sys\nip = IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[(\n 'MSS', 689), ('WScale', 1)])\nsynack_packet = sr1(ip / syn_packet)\nmy_ack = synack_packet.seq + 1\nack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack)\nsend(ip / ack_packet)\n",
"step-5": "#! /usr/bin/python3\n\nfrom scapy.all import *\nimport sys\n\nip=IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags=\"S\", seq=100, options=[('MSS',689),('WScale',1)])\nsynack_packet = sr1(ip/syn_packet)\nmy_ack = synack_packet.seq+1\nack_packet = TCP(sport=52255, dport=1237, flags=\"A\", seq=101, ack=my_ack)\nsend(ip/ack_packet)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def ex51():
with urllib.request.urlopen(TIME_URL) as response:
body = response.read()
parsed = json.loads(body)
date = datetime.fromisoformat(parsed['currentTime'])
stamp = date.strftime('%H:%M:%S %Z %B %m %d')
print('The current time is %s' % stamp)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ex51():
with urllib.request.urlopen(TIME_URL) as response:
body = response.read()
parsed = json.loads(body)
date = datetime.fromisoformat(parsed['currentTime'])
stamp = date.strftime('%H:%M:%S %Z %B %m %d')
print('The current time is %s' % stamp)
if __name__ == '__main__':
ex51()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TIME_URL = 'http://localhost:5000/'
def ex51():
with urllib.request.urlopen(TIME_URL) as response:
body = response.read()
parsed = json.loads(body)
date = datetime.fromisoformat(parsed['currentTime'])
stamp = date.strftime('%H:%M:%S %Z %B %m %d')
print('The current time is %s' % stamp)
if __name__ == '__main__':
ex51()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import urllib.request
import json
from datetime import datetime
TIME_URL = 'http://localhost:5000/'
def ex51():
with urllib.request.urlopen(TIME_URL) as response:
body = response.read()
parsed = json.loads(body)
date = datetime.fromisoformat(parsed['currentTime'])
stamp = date.strftime('%H:%M:%S %Z %B %m %d')
print('The current time is %s' % stamp)
if __name__ == '__main__':
ex51()
<|reserved_special_token_1|>
"""Time client"""
import urllib.request
import json
from datetime import datetime
# make sure that module51-server.py service is running
TIME_URL = "http://localhost:5000/"
def ex51():
with urllib.request.urlopen(TIME_URL) as response:
body = response.read()
parsed = json.loads(body)
date = datetime.fromisoformat(parsed["currentTime"])
stamp = date.strftime("%H:%M:%S %Z %B %m %d")
print("The current time is %s" % stamp)
if __name__ == "__main__":
ex51()
|
flexible
|
{
"blob_id": "e8f05a66c642ef3b570130a2996ca27efb8b0cb5",
"index": 5287,
"step-1": "<mask token>\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\nif __name__ == '__main__':\n ex51()\n",
"step-3": "<mask token>\nTIME_URL = 'http://localhost:5000/'\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\nif __name__ == '__main__':\n ex51()\n",
"step-4": "<mask token>\nimport urllib.request\nimport json\nfrom datetime import datetime\nTIME_URL = 'http://localhost:5000/'\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\nif __name__ == '__main__':\n ex51()\n",
"step-5": "\"\"\"Time client\"\"\"\n\nimport urllib.request\nimport json\nfrom datetime import datetime\n\n# make sure that module51-server.py service is running\nTIME_URL = \"http://localhost:5000/\"\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed[\"currentTime\"])\n stamp = date.strftime(\"%H:%M:%S %Z %B %m %d\")\n print(\"The current time is %s\" % stamp)\n\nif __name__ == \"__main__\":\n ex51()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import secrets
from pathlib import Path
HASHCAT_WPA_CACHE_DIR = Path.home() / ".hashcat" / "wpa-server"
ROOT_PRIVATE_DIR = Path(__file__).parent.parent
WORDLISTS_DIR = ROOT_PRIVATE_DIR / "wordlists"
WORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / "wordlists" # user custom wordlists
RULES_DIR = ROOT_PRIVATE_DIR / "rules"
MASKS_DIR = ROOT_PRIVATE_DIR / "masks"
LOGS_DIR = ROOT_PRIVATE_DIR / "logs"
DATABASE_DIR = HASHCAT_WPA_CACHE_DIR / "database"
ESSID_TRIED = DATABASE_DIR / "essid_tried"
DATABASE_PATH = DATABASE_DIR / "hashcat_wpa.db"
# Hashcat
HASHCAT_STATUS_TIMER = 20 # seconds
BENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / "benchmark.csv"
HASHCAT_BRAIN_PASSWORD_PATH = HASHCAT_WPA_CACHE_DIR / "brain" / "hashcat_brain_password"
# mkdirs
HASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)
WORDLISTS_USER_DIR.mkdir(exist_ok=True)
LOGS_DIR.mkdir(exist_ok=True)
DATABASE_DIR.mkdir(exist_ok=True)
HASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)
class Config:
""" Flask application config """
SECRET_KEY = secrets.token_bytes(64)
# Flask-SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(DATABASE_PATH)
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Airodump capture files
CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / "captures"
|
normal
|
{
"blob_id": "20d480517226cb7fbced765554a02fa5cbc29033",
"index": 6491,
"step-1": "<mask token>\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n",
"step-2": "<mask token>\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n",
"step-3": "<mask token>\nHASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server'\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists'\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists'\nRULES_DIR = ROOT_PRIVATE_DIR / 'rules'\nMASKS_DIR = ROOT_PRIVATE_DIR / 'masks'\nLOGS_DIR = ROOT_PRIVATE_DIR / 'logs'\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database'\nESSID_TRIED = DATABASE_DIR / 'essid_tried'\nDATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db'\nHASHCAT_STATUS_TIMER = 20\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv'\nHASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' /\n 'hashcat_brain_password')\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n",
"step-4": "import secrets\nfrom pathlib import Path\nHASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server'\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists'\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists'\nRULES_DIR = ROOT_PRIVATE_DIR / 'rules'\nMASKS_DIR = ROOT_PRIVATE_DIR / 'masks'\nLOGS_DIR = ROOT_PRIVATE_DIR / 'logs'\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database'\nESSID_TRIED = DATABASE_DIR / 'essid_tried'\nDATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db'\nHASHCAT_STATUS_TIMER = 20\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv'\nHASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' /\n 'hashcat_brain_password')\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n",
"step-5": "import secrets\nfrom pathlib import Path\n\nHASHCAT_WPA_CACHE_DIR = Path.home() / \".hashcat\" / \"wpa-server\"\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\n\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / \"wordlists\"\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / \"wordlists\" # user custom wordlists\nRULES_DIR = ROOT_PRIVATE_DIR / \"rules\"\nMASKS_DIR = ROOT_PRIVATE_DIR / \"masks\"\nLOGS_DIR = ROOT_PRIVATE_DIR / \"logs\"\n\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / \"database\"\nESSID_TRIED = DATABASE_DIR / \"essid_tried\"\nDATABASE_PATH = DATABASE_DIR / \"hashcat_wpa.db\"\n\n# Hashcat\nHASHCAT_STATUS_TIMER = 20 # seconds\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / \"benchmark.csv\"\nHASHCAT_BRAIN_PASSWORD_PATH = HASHCAT_WPA_CACHE_DIR / \"brain\" / \"hashcat_brain_password\"\n\n# mkdirs\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n\n SECRET_KEY = secrets.token_bytes(64)\n\n # Flask-SQLAlchemy settings\n SQLALCHEMY_DATABASE_URI = \"sqlite:///{}\".format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n # Airodump capture files\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / \"captures\"\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get():
market = 'Premium'
url = 'https://coinpremiums.herokuapp.com/json'
try:
result = ''
premiums = requests.get(url).json()
for exchange, exchange_currencies in premiums['premium'].items():
result += '[[{} | '.format(exchange.title())
_sum = 0
_cnt = 0
for currency_name, currency in exchange_currencies.items():
premium = currency['raw'] - 1
result += '[{}] {:.2%} '.format(currency_name.upper(), premium)
_cnt += 1
_sum += premium
result += '[평균] {:.2%} ]] '.format(_sum / _cnt)
except Exception as e:
result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.
__repr__())
return result
<|reserved_special_token_1|>
import requests
import json
def get():
market = 'Premium'
url = 'https://coinpremiums.herokuapp.com/json'
try:
result = ''
premiums = requests.get(url).json()
for exchange, exchange_currencies in premiums['premium'].items():
result += '[[{} | '.format(exchange.title())
_sum = 0
_cnt = 0
for currency_name, currency in exchange_currencies.items():
premium = currency['raw'] - 1
result += '[{}] {:.2%} '.format(currency_name.upper(), premium)
_cnt += 1
_sum += premium
result += '[평균] {:.2%} ]] '.format(_sum / _cnt)
except Exception as e:
result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.
__repr__())
return result
<|reserved_special_token_1|>
import requests
import json
def get():
market = 'Premium'
url = 'https://coinpremiums.herokuapp.com/json'
try:
result = ""
premiums = requests.get(url).json()
for exchange, exchange_currencies in premiums['premium'].items():
result += '[[{} | '.format(exchange.title())
_sum = 0
_cnt = 0
for currency_name, currency in exchange_currencies.items():
premium = currency['raw'] - 1
result += '[{}] {:.2%} '.format(currency_name.upper(), premium)
_cnt += 1
_sum += premium
result += '[평균] {:.2%} ]] '.format(_sum / _cnt)
except Exception as e:
result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.__repr__())
return result
|
flexible
|
{
"blob_id": "b5581be044013df9ff812f285f99ca67c4f96a62",
"index": 2927,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n try:\n result = ''\n premiums = requests.get(url).json()\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.\n __repr__())\n return result\n",
"step-3": "import requests\nimport json\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n try:\n result = ''\n premiums = requests.get(url).json()\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.\n __repr__())\n return result\n",
"step-4": "import requests\nimport json\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n\n try:\n result = \"\"\n premiums = requests.get(url).json()\n\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.__repr__())\n\n return result\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# =============>This is a Normal mathematical tasks<==========
x = 7
x = 7 // 3 # rounds the number = 2 ans class int
#x = 7 / 3 # gives the floating number = 2.33333335 ans class float
#x = 7 % 3 # gives the reminder = 1 ans class int
#print("x is {}" .format(x))
#print(type(x))
# ================>This is how to add decimal accuracy vs procession<================
# x = .1 + .1 + .1 -.3 the answer is 5.551115123125783 because python doe not understand accuracy and precision to overcome do the import * from decimal
from decimal import *
x = .1 + .1 + .1 -.3
print("x is {}" .format(x))
print(type(x))
# =============>How to solve the above problem accuracy<===============
# And the type is class decimal.Decimal
# When dealing with money use this method
from decimal import *
a = Decimal('.10') # it will conver from string
b = Decimal('.30')
x = a + a + a - b
print("x is {}" .format(x))
print(type(x))
|
normal
|
{
"blob_id": "62a7958ba5ebb6da866d6ef156e52136df22f235",
"index": 107,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('x is {}'.format(x))\nprint(type(x))\n<mask token>\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-3": "x = 7\nx = 7 // 3\n<mask token>\nx = 0.1 + 0.1 + 0.1 - 0.3\nprint('x is {}'.format(x))\nprint(type(x))\n<mask token>\na = Decimal('.10')\nb = Decimal('.30')\nx = a + a + a - b\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-4": "x = 7\nx = 7 // 3\nfrom decimal import *\nx = 0.1 + 0.1 + 0.1 - 0.3\nprint('x is {}'.format(x))\nprint(type(x))\nfrom decimal import *\na = Decimal('.10')\nb = Decimal('.30')\nx = a + a + a - b\nprint('x is {}'.format(x))\nprint(type(x))\n",
"step-5": "\n# =============>This is a Normal mathematical tasks<==========\nx = 7\nx = 7 // 3 # rounds the number = 2 ans class int\n#x = 7 / 3 # gives the floating number = 2.33333335 ans class float\n#x = 7 % 3 # gives the reminder = 1 ans class int\n\n#print(\"x is {}\" .format(x))\n#print(type(x))\n# ================>This is how to add decimal accuracy vs procession<================\n# x = .1 + .1 + .1 -.3 the answer is 5.551115123125783 because python doe not understand accuracy and precision to overcome do the import * from decimal\nfrom decimal import *\nx = .1 + .1 + .1 -.3\nprint(\"x is {}\" .format(x))\nprint(type(x))\n# =============>How to solve the above problem accuracy<===============\n# And the type is class decimal.Decimal\n# When dealing with money use this method\nfrom decimal import *\na = Decimal('.10') # it will conver from string\nb = Decimal('.30')\nx = a + a + a - b\nprint(\"x is {}\" .format(x))\nprint(type(x))\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.1.3 on 2020-06-05 23:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0005_userip_serial_number'),
]
operations = [
migrations.AddField(
model_name='userip',
name='ip_attribution',
field=models.CharField(default='', max_length=8, verbose_name='ip地址'),
),
]
|
normal
|
{
"blob_id": "a90db2073d43d54cbcc04e3000e5d0f2a2da4a55",
"index": 5281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('index', '0005_userip_serial_number')]\n operations = [migrations.AddField(model_name='userip', name=\n 'ip_attribution', field=models.CharField(default='', max_length=8,\n verbose_name='ip地址'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('index', '0005_userip_serial_number')]\n operations = [migrations.AddField(model_name='userip', name=\n 'ip_attribution', field=models.CharField(default='', max_length=8,\n verbose_name='ip地址'))]\n",
"step-5": "# Generated by Django 2.1.3 on 2020-06-05 23:06\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('index', '0005_userip_serial_number'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userip',\n name='ip_attribution',\n field=models.CharField(default='', max_length=8, verbose_name='ip地址'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fence_decipher(m: str, key: int) ->str:
chunklens = [(0) for _ in range(key)]
nfence = 0
dx = 1
for i in m:
chunklens[nfence] += 1
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
print(chunklens)
chunks = []
x = 0
for chunklen in chunklens:
chunks.append(list(m[x:x + chunklen]))
x += chunklen
nfence = 0
dx = 1
ans = []
for _ in m:
ans.append(chunks[nfence].pop(0))
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
return ''.join(ans)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fence_decipher(m: str, key: int) ->str:
chunklens = [(0) for _ in range(key)]
nfence = 0
dx = 1
for i in m:
chunklens[nfence] += 1
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
print(chunklens)
chunks = []
x = 0
for chunklen in chunklens:
chunks.append(list(m[x:x + chunklen]))
x += chunklen
nfence = 0
dx = 1
ans = []
for _ in m:
ans.append(chunks[nfence].pop(0))
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
return ''.join(ans)
if __name__ == '__main__':
print(fence_decipher(s, 4))
<|reserved_special_token_1|>
s = """Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски
аpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё
он ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон
ыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га
ынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »
иусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе
еоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы
т аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа
мж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне
оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч
ртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер
й к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4"""
def fence_decipher(m: str, key: int) ->str:
chunklens = [(0) for _ in range(key)]
nfence = 0
dx = 1
for i in m:
chunklens[nfence] += 1
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
print(chunklens)
chunks = []
x = 0
for chunklen in chunklens:
chunks.append(list(m[x:x + chunklen]))
x += chunklen
nfence = 0
dx = 1
ans = []
for _ in m:
ans.append(chunks[nfence].pop(0))
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
return ''.join(ans)
if __name__ == '__main__':
print(fence_decipher(s, 4))
<|reserved_special_token_1|>
#!/usr/bin/env python
s = '''Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски
аpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё
он ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон
ыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га
ынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »
иусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе
еоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы
т аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа
мж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне
оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч
ртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер
й к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4'''
def fence_decipher(m: str, key: int) -> str:
chunklens = [0 for _ in range(key)]
nfence = 0
dx = 1
for i in m:
chunklens[nfence] += 1
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
print(chunklens)
chunks = []
x = 0
for chunklen in chunklens:
chunks.append(list(m[x:x + chunklen]))
x += chunklen
nfence = 0
dx = 1
ans = []
for _ in m:
ans.append(chunks[nfence].pop(0))
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
return ''.join(ans)
if __name__ == '__main__':
print(fence_decipher(s, 4))
|
flexible
|
{
"blob_id": "a8bed0b5a6a95d67b5602b395f1d0ea12cd53fb0",
"index": 9166,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-4": "s = \"\"\"Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски\nаpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё\nон ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон\nыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га\nынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »\nиусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе\nеоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы\nт аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа\nмж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне\n оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч\nртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер\nй к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4\"\"\"\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-5": "#!/usr/bin/env python\ns = '''Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски\nаpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё\nон ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон\nыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га\nынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »\nиусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе\nеоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы\nт аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа\nмж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне\n оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч\nртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер\nй к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4'''\n\n\ndef fence_decipher(m: str, key: int) -> str:\n chunklens = [0 for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from bs4 import BeautifulSoup
from aiounfurl.parsers import oembed
def test_oembed_not_match(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'http://test.com'
assert oembed_url_extractor.get_oembed_url(url) is None
def test_oembed_founded(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
def test_oembed_discovery(oembed_providers, files_dir):
oembed_html = (files_dir / 'oembed_json.html').read_text()
soup = BeautifulSoup(oembed_html)
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)
assert isinstance(oembed_url, str)
def test_oembed_params(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,
params={'maxwidth': 200})
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
assert 'maxwidth=200' in oembed_url
|
normal
|
{
"blob_id": "7b2ad0b4eca7b31b314e32ad57d51be82f0eaf61",
"index": 6979,
"step-1": "<mask token>\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_params(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,\n params={'maxwidth': 200})\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n assert 'maxwidth=200' in oembed_url\n",
"step-4": "from bs4 import BeautifulSoup\nfrom aiounfurl.parsers import oembed\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_params(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,\n params={'maxwidth': 200})\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n assert 'maxwidth=200' in oembed_url\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User, Group
# Create your models here.
def default_expiration():
return timezone.now() + timezone.timedelta(days=10)
class Category(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.name
class Survey(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
category = models.ForeignKey(Category, blank=True, null=True, on_delete=models.CASCADE)
users = models.ManyToManyField(User, through='SurveyToUser')
groups = models.ManyToManyField(Group, through='SurveyToGroup')
pub_date = models.DateTimeField(default=timezone.now)
exp_date = models.DateTimeField(default=default_expiration)
period = models.DurationField(default=None, null=True, blank=True)
@property
def get_questions(self):
return self.question_set.all()
def __str__(self):
return f'{self.name}: {self.description}'
class Question(models.Model):
TEXT = 'text'
NUMBER = 'number'
SELECT = 'select'
SELECT_MULTIPLE = 'select-multiple'
RADIO = 'radio'
CHECKBOX = 'checkbox'
SURVEY_QUESTION_TYPES_CHOICES = [
(TEXT, 'text'),
(NUMBER, 'number'),
(SELECT, 'select'),
(SELECT_MULTIPLE, 'select-multiple'),
(RADIO, 'radio'),
(CHECKBOX, 'checkbox'),
]
text = models.TextField()
required = models.BooleanField(default=True)
survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=models.CASCADE)
question_type = models.CharField(
max_length=50,
choices=SURVEY_QUESTION_TYPES_CHOICES,
default=TEXT,
)
choices = models.TextField(blank=True, null=True)
other = models.TextField(blank=True, null=True)
def __str__(self):
return f'Question: {self.text} of survey {self.survey.name}'
class Answer(models.Model):
text = models.TextField(blank=True)
question = models.ForeignKey(Question, blank=False, null=False, on_delete=models.CASCADE)
user = models.ForeignKey(User, blank=False, null=False, on_delete=models.CASCADE)
def __str__(self):
return f'Answer: {self.text} to the Question {self.question.text} ' \
f'given by User: {self.user.username} for survey {self.question.survey.name}'
class SurveyToUser(models.Model):
user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)
survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=models.CASCADE, related_name='user_survey')
completed = models.BooleanField(default=False, null=False)
completion_date = models.DateTimeField(default=None, null=True, blank=True)
class SurveyToGroup(models.Model):
group = models.ForeignKey(Group, blank=True, null=True, on_delete=models.CASCADE)
survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=models.CASCADE)
completed = models.BooleanField(default=False, null=False)
completion_date = models.DateTimeField(default=None, null=True, blank=True)
class StarRating(models.Model):
text = models.TextField(blank=True, null=False, default=None)
user = models.ForeignKey(User, blank=False, null=False, on_delete=models.CASCADE)
completed = models.DateTimeField(blank=True, null=True, default=None)
|
normal
|
{
"blob_id": "33b6a4c76079ed698809b29772abb59a34831472",
"index": 5900,
"step-1": "<mask token>\n\n\nclass Survey(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n category = models.ForeignKey(Category, blank=True, null=True, on_delete\n =models.CASCADE)\n users = models.ManyToManyField(User, through='SurveyToUser')\n groups = models.ManyToManyField(Group, through='SurveyToGroup')\n pub_date = models.DateTimeField(default=timezone.now)\n exp_date = models.DateTimeField(default=default_expiration)\n period = models.DurationField(default=None, null=True, blank=True)\n\n @property\n def get_questions(self):\n return self.question_set.all()\n\n def __str__(self):\n return f'{self.name}: {self.description}'\n\n\nclass Question(models.Model):\n TEXT = 'text'\n NUMBER = 'number'\n SELECT = 'select'\n SELECT_MULTIPLE = 'select-multiple'\n RADIO = 'radio'\n CHECKBOX = 'checkbox'\n SURVEY_QUESTION_TYPES_CHOICES = [(TEXT, 'text'), (NUMBER, 'number'), (\n SELECT, 'select'), (SELECT_MULTIPLE, 'select-multiple'), (RADIO,\n 'radio'), (CHECKBOX, 'checkbox')]\n text = models.TextField()\n required = models.BooleanField(default=True)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n question_type = models.CharField(max_length=50, choices=\n SURVEY_QUESTION_TYPES_CHOICES, default=TEXT)\n choices = models.TextField(blank=True, null=True)\n other = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f'Question: {self.text} of survey {self.survey.name}'\n\n\nclass Answer(models.Model):\n text = models.TextField(blank=True)\n question = models.ForeignKey(Question, blank=False, null=False,\n on_delete=models.CASCADE)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'Answer: {self.text} to the Question {self.question.text} given by User: {self.user.username} for survey {self.question.survey.name}'\n )\n\n\nclass SurveyToUser(models.Model):\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.\n CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE, related_name='user_survey')\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass SurveyToGroup(models.Model):\n group = models.ForeignKey(Group, blank=True, null=True, on_delete=\n models.CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass StarRating(models.Model):\n text = models.TextField(blank=True, null=False, default=None)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.DateTimeField(blank=True, null=True, default=None)\n",
"step-2": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Survey(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n category = models.ForeignKey(Category, blank=True, null=True, on_delete\n =models.CASCADE)\n users = models.ManyToManyField(User, through='SurveyToUser')\n groups = models.ManyToManyField(Group, through='SurveyToGroup')\n pub_date = models.DateTimeField(default=timezone.now)\n exp_date = models.DateTimeField(default=default_expiration)\n period = models.DurationField(default=None, null=True, blank=True)\n\n @property\n def get_questions(self):\n return self.question_set.all()\n\n def __str__(self):\n return f'{self.name}: {self.description}'\n\n\nclass Question(models.Model):\n TEXT = 'text'\n NUMBER = 'number'\n SELECT = 'select'\n SELECT_MULTIPLE = 'select-multiple'\n RADIO = 'radio'\n CHECKBOX = 'checkbox'\n SURVEY_QUESTION_TYPES_CHOICES = [(TEXT, 'text'), (NUMBER, 'number'), (\n SELECT, 'select'), (SELECT_MULTIPLE, 'select-multiple'), (RADIO,\n 'radio'), (CHECKBOX, 'checkbox')]\n text = models.TextField()\n required = models.BooleanField(default=True)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n question_type = models.CharField(max_length=50, choices=\n SURVEY_QUESTION_TYPES_CHOICES, default=TEXT)\n choices = models.TextField(blank=True, null=True)\n other = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f'Question: {self.text} of survey {self.survey.name}'\n\n\nclass Answer(models.Model):\n text = models.TextField(blank=True)\n question = models.ForeignKey(Question, blank=False, null=False,\n on_delete=models.CASCADE)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'Answer: {self.text} to the Question {self.question.text} given by User: {self.user.username} for survey {self.question.survey.name}'\n )\n\n\nclass SurveyToUser(models.Model):\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.\n CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE, related_name='user_survey')\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass SurveyToGroup(models.Model):\n group = models.ForeignKey(Group, blank=True, null=True, on_delete=\n models.CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass StarRating(models.Model):\n text = models.TextField(blank=True, null=False, default=None)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.DateTimeField(blank=True, null=True, default=None)\n",
"step-3": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\nclass Survey(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n category = models.ForeignKey(Category, blank=True, null=True, on_delete\n =models.CASCADE)\n users = models.ManyToManyField(User, through='SurveyToUser')\n groups = models.ManyToManyField(Group, through='SurveyToGroup')\n pub_date = models.DateTimeField(default=timezone.now)\n exp_date = models.DateTimeField(default=default_expiration)\n period = models.DurationField(default=None, null=True, blank=True)\n\n @property\n def get_questions(self):\n return self.question_set.all()\n\n def __str__(self):\n return f'{self.name}: {self.description}'\n\n\nclass Question(models.Model):\n TEXT = 'text'\n NUMBER = 'number'\n SELECT = 'select'\n SELECT_MULTIPLE = 'select-multiple'\n RADIO = 'radio'\n CHECKBOX = 'checkbox'\n SURVEY_QUESTION_TYPES_CHOICES = [(TEXT, 'text'), (NUMBER, 'number'), (\n SELECT, 'select'), (SELECT_MULTIPLE, 'select-multiple'), (RADIO,\n 'radio'), (CHECKBOX, 'checkbox')]\n text = models.TextField()\n required = models.BooleanField(default=True)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n question_type = models.CharField(max_length=50, choices=\n SURVEY_QUESTION_TYPES_CHOICES, default=TEXT)\n choices = models.TextField(blank=True, null=True)\n other = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f'Question: {self.text} of survey {self.survey.name}'\n\n\nclass Answer(models.Model):\n text = models.TextField(blank=True)\n question = models.ForeignKey(Question, blank=False, null=False,\n on_delete=models.CASCADE)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'Answer: {self.text} to the Question {self.question.text} given by User: {self.user.username} for survey {self.question.survey.name}'\n )\n\n\nclass SurveyToUser(models.Model):\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.\n CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE, related_name='user_survey')\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass SurveyToGroup(models.Model):\n group = models.ForeignKey(Group, blank=True, null=True, on_delete=\n models.CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass StarRating(models.Model):\n text = models.TextField(blank=True, null=False, default=None)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.DateTimeField(blank=True, null=True, default=None)\n",
"step-4": "<mask token>\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n\n def __str__(self):\n return self.name\n\n\nclass Survey(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n category = models.ForeignKey(Category, blank=True, null=True, on_delete\n =models.CASCADE)\n users = models.ManyToManyField(User, through='SurveyToUser')\n groups = models.ManyToManyField(Group, through='SurveyToGroup')\n pub_date = models.DateTimeField(default=timezone.now)\n exp_date = models.DateTimeField(default=default_expiration)\n period = models.DurationField(default=None, null=True, blank=True)\n\n @property\n def get_questions(self):\n return self.question_set.all()\n\n def __str__(self):\n return f'{self.name}: {self.description}'\n\n\nclass Question(models.Model):\n TEXT = 'text'\n NUMBER = 'number'\n SELECT = 'select'\n SELECT_MULTIPLE = 'select-multiple'\n RADIO = 'radio'\n CHECKBOX = 'checkbox'\n SURVEY_QUESTION_TYPES_CHOICES = [(TEXT, 'text'), (NUMBER, 'number'), (\n SELECT, 'select'), (SELECT_MULTIPLE, 'select-multiple'), (RADIO,\n 'radio'), (CHECKBOX, 'checkbox')]\n text = models.TextField()\n required = models.BooleanField(default=True)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n question_type = models.CharField(max_length=50, choices=\n SURVEY_QUESTION_TYPES_CHOICES, default=TEXT)\n choices = models.TextField(blank=True, null=True)\n other = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f'Question: {self.text} of survey {self.survey.name}'\n\n\nclass Answer(models.Model):\n text = models.TextField(blank=True)\n question = models.ForeignKey(Question, blank=False, null=False,\n on_delete=models.CASCADE)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n\n def __str__(self):\n return (\n f'Answer: {self.text} to the Question {self.question.text} given by User: {self.user.username} for survey {self.question.survey.name}'\n )\n\n\nclass SurveyToUser(models.Model):\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.\n CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE, related_name='user_survey')\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass SurveyToGroup(models.Model):\n group = models.ForeignKey(Group, blank=True, null=True, on_delete=\n models.CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass StarRating(models.Model):\n text = models.TextField(blank=True, null=False, default=None)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=\n models.CASCADE)\n completed = models.DateTimeField(blank=True, null=True, default=None)\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User, Group\n\n# Create your models here.\n\n\ndef default_expiration():\n return timezone.now() + timezone.timedelta(days=10)\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n\n def __str__(self):\n return self.name\n\n\nclass Survey(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n category = models.ForeignKey(Category, blank=True, null=True, on_delete=models.CASCADE)\n users = models.ManyToManyField(User, through='SurveyToUser')\n groups = models.ManyToManyField(Group, through='SurveyToGroup')\n pub_date = models.DateTimeField(default=timezone.now)\n exp_date = models.DateTimeField(default=default_expiration)\n period = models.DurationField(default=None, null=True, blank=True)\n\n @property\n def get_questions(self):\n return self.question_set.all()\n\n def __str__(self):\n return f'{self.name}: {self.description}'\n\n\nclass Question(models.Model):\n TEXT = 'text'\n NUMBER = 'number'\n SELECT = 'select'\n SELECT_MULTIPLE = 'select-multiple'\n RADIO = 'radio'\n CHECKBOX = 'checkbox'\n\n SURVEY_QUESTION_TYPES_CHOICES = [\n (TEXT, 'text'),\n (NUMBER, 'number'),\n (SELECT, 'select'),\n (SELECT_MULTIPLE, 'select-multiple'),\n (RADIO, 'radio'),\n (CHECKBOX, 'checkbox'),\n ]\n\n text = models.TextField()\n required = models.BooleanField(default=True)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=models.CASCADE)\n question_type = models.CharField(\n max_length=50,\n choices=SURVEY_QUESTION_TYPES_CHOICES,\n default=TEXT,\n )\n choices = models.TextField(blank=True, null=True)\n other = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f'Question: {self.text} of survey {self.survey.name}'\n\n\nclass Answer(models.Model):\n text = models.TextField(blank=True)\n question = models.ForeignKey(Question, blank=False, null=False, on_delete=models.CASCADE)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=models.CASCADE)\n\n def __str__(self):\n return f'Answer: {self.text} to the Question {self.question.text} ' \\\n f'given by User: {self.user.username} for survey {self.question.survey.name}'\n\n\nclass SurveyToUser(models.Model):\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=models.CASCADE, related_name='user_survey')\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass SurveyToGroup(models.Model):\n group = models.ForeignKey(Group, blank=True, null=True, on_delete=models.CASCADE)\n survey = models.ForeignKey(Survey, blank=False, null=False, on_delete=models.CASCADE)\n completed = models.BooleanField(default=False, null=False)\n completion_date = models.DateTimeField(default=None, null=True, blank=True)\n\n\nclass StarRating(models.Model):\n text = models.TextField(blank=True, null=False, default=None)\n user = models.ForeignKey(User, blank=False, null=False, on_delete=models.CASCADE)\n completed = models.DateTimeField(blank=True, null=True, default=None)\n\n",
"step-ids": [
16,
17,
18,
19,
22
]
}
|
[
16,
17,
18,
19,
22
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('event', '0009_auto_20211001_0406')]
operations = [migrations.AlterField(model_name='event', name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None,
force_format='JPEG', help_text='Mapa del evento', keep_meta=True,
null=True, quality=90, size=[1920, 1080], upload_to=event.models.
event.event_pictures, verbose_name='Mapa')), migrations.AlterField(
model_name='eventagenda', name='map', field=django_resized.forms.
ResizedImageField(blank=True, crop=None, force_format='JPEG',
help_text='Mapa de la exposicion', keep_meta=True, null=True,
quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.
event_pictures, verbose_name='Mapa'))]
<|reserved_special_token_1|>
from django.db import migrations
import django_resized.forms
import event.models.event
import event.models.event_agenda
class Migration(migrations.Migration):
dependencies = [('event', '0009_auto_20211001_0406')]
operations = [migrations.AlterField(model_name='event', name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None,
force_format='JPEG', help_text='Mapa del evento', keep_meta=True,
null=True, quality=90, size=[1920, 1080], upload_to=event.models.
event.event_pictures, verbose_name='Mapa')), migrations.AlterField(
model_name='eventagenda', name='map', field=django_resized.forms.
ResizedImageField(blank=True, crop=None, force_format='JPEG',
help_text='Mapa de la exposicion', keep_meta=True, null=True,
quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.
event_pictures, verbose_name='Mapa'))]
<|reserved_special_token_1|>
# Generated by Django 3.2.7 on 2021-10-01 06:43
from django.db import migrations
import django_resized.forms
import event.models.event
import event.models.event_agenda
class Migration(migrations.Migration):
dependencies = [
('event', '0009_auto_20211001_0406'),
]
operations = [
migrations.AlterField(
model_name='event',
name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa del evento', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event.event_pictures, verbose_name='Mapa'),
),
migrations.AlterField(
model_name='eventagenda',
name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa de la exposicion', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.event_pictures, verbose_name='Mapa'),
),
]
|
flexible
|
{
"blob_id": "d0a053faccecddc84a9556aec3dff691b171df96",
"index": 9977,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('event', '0009_auto_20211001_0406')]\n operations = [migrations.AlterField(model_name='event', name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None,\n force_format='JPEG', help_text='Mapa del evento', keep_meta=True,\n null=True, quality=90, size=[1920, 1080], upload_to=event.models.\n event.event_pictures, verbose_name='Mapa')), migrations.AlterField(\n model_name='eventagenda', name='map', field=django_resized.forms.\n ResizedImageField(blank=True, crop=None, force_format='JPEG',\n help_text='Mapa de la exposicion', keep_meta=True, null=True,\n quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.\n event_pictures, verbose_name='Mapa'))]\n",
"step-4": "from django.db import migrations\nimport django_resized.forms\nimport event.models.event\nimport event.models.event_agenda\n\n\nclass Migration(migrations.Migration):\n dependencies = [('event', '0009_auto_20211001_0406')]\n operations = [migrations.AlterField(model_name='event', name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None,\n force_format='JPEG', help_text='Mapa del evento', keep_meta=True,\n null=True, quality=90, size=[1920, 1080], upload_to=event.models.\n event.event_pictures, verbose_name='Mapa')), migrations.AlterField(\n model_name='eventagenda', name='map', field=django_resized.forms.\n ResizedImageField(blank=True, crop=None, force_format='JPEG',\n help_text='Mapa de la exposicion', keep_meta=True, null=True,\n quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.\n event_pictures, verbose_name='Mapa'))]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-10-01 06:43\n\nfrom django.db import migrations\nimport django_resized.forms\nimport event.models.event\nimport event.models.event_agenda\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('event', '0009_auto_20211001_0406'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa del evento', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event.event_pictures, verbose_name='Mapa'),\n ),\n migrations.AlterField(\n model_name='eventagenda',\n name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa de la exposicion', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.event_pictures, verbose_name='Mapa'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Project Euler
Problem #41 - Pandigital prime
David 07/06/2017
'''
import time
import math
maxPandigitalPrime = 2
def isPrime(num):
if(num<=1):
return False
elif(num==2):
return True
elif(num%2==0):
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num)+1
for i in range(3,bound,2):
if(num%i==0):
return False
return True
def permutate(arr,n):
if(n==len(arr)):
#print(arr)
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if(isPrime(num)):
global maxPandigitalPrime
if(num>maxPandigitalPrime):
maxPandigitalPrime = num
else:
for i in range(n,len(arr)):
# swap index n(head), i
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr,n+1)
# swap back to resume arr
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
# main
tic = time.time()
for digit in range(2,9):
arr = list(range(1,digit+1))
permutate(arr,0)
print(maxPandigitalPrime)
toc = time.time()
print(toc-tic)
|
normal
|
{
"blob_id": "7ca7693b842700a7b15242b656648e8a7e58cd23",
"index": 1691,
"step-1": "<mask token>\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\n<mask token>\nfor digit in range(2, 9):\n arr = list(range(1, digit + 1))\n permutate(arr, 0)\nprint(maxPandigitalPrime)\n<mask token>\nprint(toc - tic)\n",
"step-3": "<mask token>\nmaxPandigitalPrime = 2\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\ntic = time.time()\nfor digit in range(2, 9):\n arr = list(range(1, digit + 1))\n permutate(arr, 0)\nprint(maxPandigitalPrime)\ntoc = time.time()\nprint(toc - tic)\n",
"step-4": "<mask token>\nimport time\nimport math\nmaxPandigitalPrime = 2\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\ntic = time.time()\nfor digit in range(2, 9):\n arr = list(range(1, digit + 1))\n permutate(arr, 0)\nprint(maxPandigitalPrime)\ntoc = time.time()\nprint(toc - tic)\n",
"step-5": "'''\nProject Euler\n\nProblem #41 - Pandigital prime\n\nDavid 07/06/2017\n'''\n\nimport time\nimport math\n\nmaxPandigitalPrime = 2\n\ndef isPrime(num):\n if(num<=1):\n return False\n elif(num==2):\n return True\n elif(num%2==0):\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num)+1\n for i in range(3,bound,2):\n if(num%i==0):\n return False\n return True\n\n\ndef permutate(arr,n):\n if(n==len(arr)):\n #print(arr)\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if(isPrime(num)):\n global maxPandigitalPrime\n if(num>maxPandigitalPrime):\n maxPandigitalPrime = num\n else:\n for i in range(n,len(arr)):\n # swap index n(head), i\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr,n+1)\n # swap back to resume arr\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\n# main\ntic = time.time()\nfor digit in range(2,9):\n arr = list(range(1,digit+1))\n permutate(arr,0)\n\nprint(maxPandigitalPrime)\ntoc = time.time()\nprint(toc-tic)\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
# números entre (8 - 26) e (44 - 44)
intervalo = list(range(8, 27)) + list(range(49, 50))
is_magic = []
for n in primos:
quadrado = n ** 2
if quadrado in intervalo:
is_magic.append(quadrado)
print(len(is_magic)) # 3
|
normal
|
{
"blob_id": "b7f443521e165f327aae9ff5d7bbb7b8462abeb5",
"index": 2890,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\nprint(len(is_magic))\n",
"step-3": "primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\nintervalo = list(range(8, 27)) + list(range(49, 50))\nis_magic = []\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\nprint(len(is_magic))\n",
"step-4": "primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n# números entre (8 - 26) e (44 - 44)\nintervalo = list(range(8, 27)) + list(range(49, 50))\nis_magic = []\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\n\nprint(len(is_magic)) # 3",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, logout, login
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views import View
class login_view(View):
template_name = 'adminbiobses/login.html'
def get(self, request):
return render(request, self.template_name)
def post(self, request):
user =authenticate(username=request.POST['username'],password=request.POST['password'])
if user is not None :
if user.is_active :
try :
login(request, user)
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['name'] = user.first_name+' '+user.last_name or ''
except :
messages.add_message(request, messages.INFO, 'Anda belum terdaftar, silahkan hubungi administrator')
return redirect('adminbiobses:index')
else :
messages.add_message(request, messages.INFO, 'user belum terverifikasi')
else :
messages.add_message(request, messages.INFO, 'user atau password anda salah')
return render(request, self.template_name)
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
|
normal
|
{
"blob_id": "e4e2e8ca65d109805b267f148e8d255d81d4ee83",
"index": 1801,
"step-1": "<mask token>\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-2": "<mask token>\n\n\nclass login_view(View):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-3": "<mask token>\n\n\nclass login_view(View):\n <mask token>\n <mask token>\n\n def post(self, request):\n user = authenticate(username=request.POST['username'], password=\n request.POST['password'])\n if user is not None:\n if user.is_active:\n try:\n login(request, user)\n request.session['user_id'] = user.id\n request.session['username'] = user.username\n request.session['name'\n ] = user.first_name + ' ' + user.last_name or ''\n except:\n messages.add_message(request, messages.INFO,\n 'Anda belum terdaftar, silahkan hubungi administrator')\n return redirect('adminbiobses:index')\n else:\n messages.add_message(request, messages.INFO,\n 'user belum terverifikasi')\n else:\n messages.add_message(request, messages.INFO,\n 'user atau password anda salah')\n return render(request, self.template_name)\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-4": "<mask token>\n\n\nclass login_view(View):\n <mask token>\n\n def get(self, request):\n return render(request, self.template_name)\n\n def post(self, request):\n user = authenticate(username=request.POST['username'], password=\n request.POST['password'])\n if user is not None:\n if user.is_active:\n try:\n login(request, user)\n request.session['user_id'] = user.id\n request.session['username'] = user.username\n request.session['name'\n ] = user.first_name + ' ' + user.last_name or ''\n except:\n messages.add_message(request, messages.INFO,\n 'Anda belum terdaftar, silahkan hubungi administrator')\n return redirect('adminbiobses:index')\n else:\n messages.add_message(request, messages.INFO,\n 'user belum terverifikasi')\n else:\n messages.add_message(request, messages.INFO,\n 'user atau password anda salah')\n return render(request, self.template_name)\n\n\nclass logout_view(View):\n\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n\n def get(self, request):\n return render(request, self.template_name)\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, logout, login\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nclass login_view(View):\n template_name = 'adminbiobses/login.html'\n\n def get(self, request):\n return render(request, self.template_name)\n\n def post(self, request):\n user =authenticate(username=request.POST['username'],password=request.POST['password'])\n if user is not None :\n if user.is_active :\n try :\n login(request, user)\n request.session['user_id'] = user.id\n request.session['username'] = user.username\n request.session['name'] = user.first_name+' '+user.last_name or ''\n except :\n messages.add_message(request, messages.INFO, 'Anda belum terdaftar, silahkan hubungi administrator')\n return redirect('adminbiobses:index')\n else :\n messages.add_message(request, messages.INFO, 'user belum terverifikasi')\n else :\n messages.add_message(request, messages.INFO, 'user atau password anda salah')\n \n return render(request, self.template_name)\n\nclass logout_view(View):\n def get(self, request):\n logout(request)\n return redirect('adminbiobses:login')\n\n@method_decorator(login_required, name='dispatch')\nclass index(View):\n template_name = 'adminbiobses/index.html'\n def get(self, request):\n return render(request, self.template_name)\n ",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
from django.db.models import Q, Avg
from django.http import JsonResponse
from rest_framework import permissions
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from base_backend import permissions as my_perms
from base_backend.utils import RequestDataFixer
from restaurants.models import User, Cuisine, MealType, AppVersion, RestaurantType, Restaurant, Menu, Order, OrderLine, \
Wilaya, City, Address, Phone
from restaurants.serializers import UserSerializer, SmsConfirmationSerializer, CuisineSerializer, \
RestaurantTypeSerializer, RestaurantSerializer, MenuSerializer, OrderLineSerializer, WilayaSerializer, \
CitySerializer, OrderWRestaurantSerializer, MealTypesWithMenuSerializer, MealTypeSerializer, OrderSerializer, \
AddressSerializer, PhoneSerializer
class LoginApi(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context=dict(request=request))
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response(
dict(
token=token.key,
user_id=user.pk,
phone=user.phone,
email=user.email,
type=user.user_type,
photo=user.photo.url if user.photo else None,
address=user.address,
city=user.lives_in_id,
birth_date=user.birth_date,
username=user.username,
# is_participant=user.client.is_participant if user.client is not None else None,
# participant_id=user.client.participant.participant_id if user.client else None,
)
)
class UserViewSet(ModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.filter(is_active=True)
def get_permissions(self):
if self.action == 'create' or self.action == 'register':
return [permissions.AllowAny()]
else:
return [permissions.IsAuthenticatedOrReadOnly()]
@action(methods=['post'], detail=False, url_path='register', permission_classes=[permissions.AllowAny()])
def register(self, request, *args, **kwargs):
response = super().create(request, *args, **kwargs)
if response:
response.data = dict(status=True, code=4)
return response
def create(self, request, *args, **kwargs):
return self.register(request, *args, **kwargs)
class OtpApi(APIView):
permission_classes = [permissions.AllowAny]
def get(self, request):
serializer = SmsConfirmationSerializer(data=request.GET)
result = serializer.resend()
if result:
response = dict(status=True, code=5)
else:
response = dict(status=False, code=21)
return Response(response)
def put(self, request):
serializer = SmsConfirmationSerializer(data=request.data)
result = serializer.activate()
if result:
response = dict(status=True, code=5)
else:
response = dict(status=False, code=20)
return Response(response)
class CuisineViewSet(ModelViewSet):
serializer_class = CuisineSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = Cuisine.objects.all()
class MealTypeViewSet(ModelViewSet):
permission_classes = [my_perms.IsAdminOrReadOnly]
serializer_class = MealTypeSerializer
queryset = MealType.objects.all()
def get_serializer(self, *args, **kwargs):
if self.action == "get_types_with_menus":
serializer_class = MealTypesWithMenuSerializer
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)
@action(['get'], detail=False, url_path="type-with-menus", )
def get_types_with_menus(self, request, *args, **kwargs):
types = self.get_queryset().filter(menus__offered_by=request.query_params.get('restaurant', 0))
types = self.get_serializer(types, many=True).data
return Response(types)
class RestaurantTypeViewSet(ModelViewSet):
serializer_class = RestaurantTypeSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = RestaurantType.objects.all()
class RestaurantViewSet(ModelViewSet):
serializer_class = RestaurantSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Restaurant.objects.all()
def _get_recommended_restaurants(self) -> queryset:
queryset = self.get_queryset()
recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))
return recommended
def _get_special_restaurants(self) -> queryset:
queryset = self.get_queryset()
special_offers_restaurants = queryset.filter(Q(menus__discount__gt=0) | Q(on_special_day=True))
return special_offers_restaurants
@action(['get'], detail=False, url_path="get-home")
def home(self, request, *args, **kwargs):
recommended = self._get_recommended_restaurants().order_by('?')[:5]
special = self._get_special_restaurants().order_by('?')[:5]
all_restaurants = self.get_queryset().order_by('?')[:5]
recommended = self.get_serializer(recommended, many=True).data
special = self.get_serializer(special, many=True).data
all_restaurants = self.get_serializer(all_restaurants, many=True).data
response = {
'recommended': recommended,
'special': special,
'all': all_restaurants
}
return Response(response)
@action(['get'], detail=False, url_path="special-offers")
def special_offers(self, request, *args, **kwargs):
serializer = self.get_serializer(self._get_special_restaurants().order_by('-created_at'), many=True)
return Response(serializer.data)
@action(['get'], detail=False, url_path="recommended-offers")
def recommended_offers(self, request, *args, **kwargs):
serializer = self.get_serializer(self._get_recommended_restaurants().order_by('-rates_avg'), many=True)
return Response(serializer.data)
@action(['get'], detail=True, url_path="restaurant-menus")
def get_restaurant_menus(self, request, *args, **kwargs):
categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(restaurant_id=self.kwargs.get('pk'))
return Response(categorized_menus)
class MenuViewSet(ModelViewSet):
serializer_class = MenuSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Menu.objects.all()
@action(['get'], detail=False, url_path="get-home")
def home(self, request, *args, **kwargs):
queryset = self.get_queryset()
special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]
recommended = queryset.all().order_by('?')[:5]
special_offers = self.get_serializer(special_offers, many=True).data
recommended = self.get_serializer(recommended, many=True).data
response = {
'recommended': recommended,
'special_offers': special_offers
}
return Response(data=response)
@action(['get'], detail=False, url_path="special-offers")
def special_offers(self, request, *args, **kwargs):
queryset = self.get_queryset()
special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at')
serializer = self.get_serializer(special_offers, many=True)
return Response(serializer.data)
@action(['get'], detail=False, url_path="recommended-offers")
def recommended_offers(self, request, *args, **kwargs):
queryset = self.get_queryset()
recommended = queryset.all().order_by('-created_at')
serializer = self.get_serializer(recommended, many=True)
return Response(serializer.data)
class OrderViewSet(ModelViewSet):
serializer_class = OrderWRestaurantSerializer
permission_classes = [permissions.IsAuthenticated]
queryset = Order.objects.all().order_by('-created_at')
def get_serializer(self, *args, **kwargs):
if self.action == "create":
return OrderSerializer(*args, **kwargs)
return super(OrderViewSet, self).get_serializer(*args, **kwargs)
def get_queryset(self):
return super(OrderViewSet, self).get_queryset().filter(client=self.request.user.client)
def create(self, request, *args, **kwargs):
fixer = RequestDataFixer(request=request)
return super(OrderViewSet, self).create(fixer, *args, **kwargs)
class OrderLineViewSet(ModelViewSet):
serializer_class = OrderLineSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = OrderLine.objects.all()
class WilayaViewSet(ModelViewSet):
serializer_class = WilayaSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = Wilaya.objects.all()
class CityViewSet(ModelViewSet):
serializer_class = CitySerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = City.objects.all()
def version(request):
print('inside this')
if request.GET.get('code', None):
code = request.GET.get('code')
AppVersion.objects.all().update(code=code)
return JsonResponse({'updated': True})
else:
code = AppVersion.objects.all().first().code
return JsonResponse({'code': code})
class AddressViewSet(ModelViewSet):
serializer_class = AddressSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Address.objects.all()
@action(['PUT'], detail=True, url_path="set-default", url_name='set-default')
def set_default(self, request, *args, **kwargs):
instance = self.get_object()
instance.default = True
instance.save()
self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.user.client).update(default=False)
return Response(self.get_serializer(instance).data)
@action(['PUT'], detail=False, url_path="set-main", url_name='set-main')
def set_main(self, request, *args, **kwargs):
self.get_queryset().filter(belongs_to=request.user.client).update(default=False)
return Response({"status": True})
def get_queryset(self):
return super(AddressViewSet, self).get_queryset().filter(belongs_to=self.request.user.client)
class PhoneViewSet(ModelViewSet):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
serializer_class = PhoneSerializer
queryset = Phone.objects.all()
@action(['PUT'], detail=False, url_path="set-main", url_name='set-main')
def set_main(self, request, *args, **kwargs):
self.get_queryset().filter(user=request.user).update(default=False)
return Response({"status": True})
@action(['PUT'], detail=True, url_path="set-default", url_name='set-default')
def set_default(self, request, *args, **kwargs):
instance = self.get_object()
instance.default = True
instance.save()
self.get_queryset().filter(~Q(pk=instance.pk), user=request.user).update(default=False)
return Response(self.get_serializer(instance).data)
def get_queryset(self):
return self.get_queryset().filter(user=self.request.user)
|
normal
|
{
"blob_id": "9e8b5cebd48b3b98e421c896d9835ada5ec4166e",
"index": 2740,
"step-1": "<mask token>\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-2": "<mask token>\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'get_types_with_menus':\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path='type-with-menus')\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.\n query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-3": "<mask token>\n\n\nclass CuisineViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'get_types_with_menus':\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path='type-with-menus')\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.\n query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-4": "<mask token>\n\n\nclass CuisineViewSet(ModelViewSet):\n serializer_class = CuisineSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Cuisine.objects.all()\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'get_types_with_menus':\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path='type-with-menus')\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.\n query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) ->queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=\n 0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {'recommended': recommended, 'special': special, 'all':\n all_restaurants}\n return Response(response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().\n order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants(\n ).order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path='restaurant-menus')\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(\n restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path='get-home')\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {'recommended': recommended, 'special_offers':\n special_offers}\n return Response(data=response)\n\n @action(['get'], detail=False, url_path='special-offers')\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at'\n )\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path='recommended-offers')\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == 'create':\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.\n request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\n<mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.\n user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(\n default=False)\n return Response({'status': True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to\n =self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path='set-main', url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({'status': True})\n\n @action(['PUT'], detail=True, url_path='set-default', url_name=\n 'set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user\n ).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-5": "from django.db.models import Q, Avg\nfrom django.http import JsonResponse\nfrom rest_framework import permissions\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom base_backend import permissions as my_perms\nfrom base_backend.utils import RequestDataFixer\nfrom restaurants.models import User, Cuisine, MealType, AppVersion, RestaurantType, Restaurant, Menu, Order, OrderLine, \\\n Wilaya, City, Address, Phone\nfrom restaurants.serializers import UserSerializer, SmsConfirmationSerializer, CuisineSerializer, \\\n RestaurantTypeSerializer, RestaurantSerializer, MenuSerializer, OrderLineSerializer, WilayaSerializer, \\\n CitySerializer, OrderWRestaurantSerializer, MealTypesWithMenuSerializer, MealTypeSerializer, OrderSerializer, \\\n AddressSerializer, PhoneSerializer\n\n\nclass LoginApi(ObtainAuthToken):\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data,\n context=dict(request=request))\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n\n return Response(\n dict(\n token=token.key,\n user_id=user.pk,\n phone=user.phone,\n email=user.email,\n type=user.user_type,\n photo=user.photo.url if user.photo else None,\n address=user.address,\n city=user.lives_in_id,\n birth_date=user.birth_date,\n username=user.username,\n # is_participant=user.client.is_participant if user.client is not None else None,\n # participant_id=user.client.participant.participant_id if user.client else None,\n )\n )\n\n\nclass UserViewSet(ModelViewSet):\n serializer_class = UserSerializer\n queryset = User.objects.filter(is_active=True)\n\n def get_permissions(self):\n if self.action == 'create' or self.action == 'register':\n return [permissions.AllowAny()]\n else:\n return [permissions.IsAuthenticatedOrReadOnly()]\n\n @action(methods=['post'], detail=False, url_path='register', permission_classes=[permissions.AllowAny()])\n def register(self, request, *args, **kwargs):\n response = super().create(request, *args, **kwargs)\n if response:\n response.data = dict(status=True, code=4)\n return response\n\n def create(self, request, *args, **kwargs):\n return self.register(request, *args, **kwargs)\n\n\nclass OtpApi(APIView):\n permission_classes = [permissions.AllowAny]\n\n def get(self, request):\n serializer = SmsConfirmationSerializer(data=request.GET)\n result = serializer.resend()\n if result:\n response = dict(status=True, code=5)\n else:\n response = dict(status=False, code=21)\n return Response(response)\n\n def put(self, request):\n serializer = SmsConfirmationSerializer(data=request.data)\n result = serializer.activate()\n if result:\n response = dict(status=True, code=5)\n else:\n response = dict(status=False, code=20)\n return Response(response)\n\n\nclass CuisineViewSet(ModelViewSet):\n serializer_class = CuisineSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Cuisine.objects.all()\n\n\nclass MealTypeViewSet(ModelViewSet):\n permission_classes = [my_perms.IsAdminOrReadOnly]\n serializer_class = MealTypeSerializer\n queryset = MealType.objects.all()\n\n def get_serializer(self, *args, **kwargs):\n if self.action == \"get_types_with_menus\":\n serializer_class = MealTypesWithMenuSerializer\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)\n\n @action(['get'], detail=False, url_path=\"type-with-menus\", )\n def get_types_with_menus(self, request, *args, **kwargs):\n types = self.get_queryset().filter(menus__offered_by=request.query_params.get('restaurant', 0))\n types = self.get_serializer(types, many=True).data\n return Response(types)\n\n\nclass RestaurantTypeViewSet(ModelViewSet):\n serializer_class = RestaurantTypeSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = RestaurantType.objects.all()\n\n\nclass RestaurantViewSet(ModelViewSet):\n serializer_class = RestaurantSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Restaurant.objects.all()\n\n def _get_recommended_restaurants(self) -> queryset:\n queryset = self.get_queryset()\n recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))\n return recommended\n\n def _get_special_restaurants(self) -> queryset:\n queryset = self.get_queryset()\n special_offers_restaurants = queryset.filter(Q(menus__discount__gt=0) | Q(on_special_day=True))\n return special_offers_restaurants\n\n @action(['get'], detail=False, url_path=\"get-home\")\n def home(self, request, *args, **kwargs):\n recommended = self._get_recommended_restaurants().order_by('?')[:5]\n special = self._get_special_restaurants().order_by('?')[:5]\n all_restaurants = self.get_queryset().order_by('?')[:5]\n recommended = self.get_serializer(recommended, many=True).data\n special = self.get_serializer(special, many=True).data\n all_restaurants = self.get_serializer(all_restaurants, many=True).data\n response = {\n 'recommended': recommended,\n 'special': special,\n 'all': all_restaurants\n }\n return Response(response)\n\n @action(['get'], detail=False, url_path=\"special-offers\")\n def special_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_special_restaurants().order_by('-created_at'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path=\"recommended-offers\")\n def recommended_offers(self, request, *args, **kwargs):\n serializer = self.get_serializer(self._get_recommended_restaurants().order_by('-rates_avg'), many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=True, url_path=\"restaurant-menus\")\n def get_restaurant_menus(self, request, *args, **kwargs):\n categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(restaurant_id=self.kwargs.get('pk'))\n return Response(categorized_menus)\n\n\nclass MenuViewSet(ModelViewSet):\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Menu.objects.all()\n\n @action(['get'], detail=False, url_path=\"get-home\")\n def home(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]\n recommended = queryset.all().order_by('?')[:5]\n special_offers = self.get_serializer(special_offers, many=True).data\n recommended = self.get_serializer(recommended, many=True).data\n response = {\n 'recommended': recommended,\n 'special_offers': special_offers\n }\n return Response(data=response)\n\n @action(['get'], detail=False, url_path=\"special-offers\")\n def special_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at')\n serializer = self.get_serializer(special_offers, many=True)\n return Response(serializer.data)\n\n @action(['get'], detail=False, url_path=\"recommended-offers\")\n def recommended_offers(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n recommended = queryset.all().order_by('-created_at')\n serializer = self.get_serializer(recommended, many=True)\n return Response(serializer.data)\n\n\nclass OrderViewSet(ModelViewSet):\n serializer_class = OrderWRestaurantSerializer\n permission_classes = [permissions.IsAuthenticated]\n queryset = Order.objects.all().order_by('-created_at')\n\n def get_serializer(self, *args, **kwargs):\n if self.action == \"create\":\n return OrderSerializer(*args, **kwargs)\n return super(OrderViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n return super(OrderViewSet, self).get_queryset().filter(client=self.request.user.client)\n\n def create(self, request, *args, **kwargs):\n fixer = RequestDataFixer(request=request)\n return super(OrderViewSet, self).create(fixer, *args, **kwargs)\n\n\nclass OrderLineViewSet(ModelViewSet):\n serializer_class = OrderLineSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = OrderLine.objects.all()\n\n\nclass WilayaViewSet(ModelViewSet):\n serializer_class = WilayaSerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = Wilaya.objects.all()\n\n\nclass CityViewSet(ModelViewSet):\n serializer_class = CitySerializer\n permission_classes = [my_perms.IsAdminOrReadOnly]\n queryset = City.objects.all()\n\n\ndef version(request):\n print('inside this')\n if request.GET.get('code', None):\n code = request.GET.get('code')\n AppVersion.objects.all().update(code=code)\n return JsonResponse({'updated': True})\n else:\n code = AppVersion.objects.all().first().code\n return JsonResponse({'code': code})\n\n\nclass AddressViewSet(ModelViewSet):\n serializer_class = AddressSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n queryset = Address.objects.all()\n\n @action(['PUT'], detail=True, url_path=\"set-default\", url_name='set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.user.client).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n @action(['PUT'], detail=False, url_path=\"set-main\", url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(belongs_to=request.user.client).update(default=False)\n return Response({\"status\": True})\n\n def get_queryset(self):\n return super(AddressViewSet, self).get_queryset().filter(belongs_to=self.request.user.client)\n\n\nclass PhoneViewSet(ModelViewSet):\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n serializer_class = PhoneSerializer\n queryset = Phone.objects.all()\n\n @action(['PUT'], detail=False, url_path=\"set-main\", url_name='set-main')\n def set_main(self, request, *args, **kwargs):\n self.get_queryset().filter(user=request.user).update(default=False)\n return Response({\"status\": True})\n\n @action(['PUT'], detail=True, url_path=\"set-default\", url_name='set-default')\n def set_default(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.default = True\n instance.save()\n self.get_queryset().filter(~Q(pk=instance.pk), user=request.user).update(default=False)\n return Response(self.get_serializer(instance).data)\n\n def get_queryset(self):\n return self.get_queryset().filter(user=self.request.user)\n",
"step-ids": [
34,
40,
41,
42,
56
]
}
|
[
34,
40,
41,
42,
56
] |
import Tkinter
import random
secret = random.randint(1, 100)
### TKINTER ELEMENTS ###
window = Tkinter.Tk()
# greeting text
greeting = Tkinter.Label(window, text="Guess the secret number!")
greeting.pack()
# guess entry field
guess = Tkinter.Entry(window)
guess.pack()
# submit button
submit = Tkinter.Button(window, text="Submit") # add a button, but this button is doing nothing
submit.pack()
window.mainloop()
|
normal
|
{
"blob_id": "59eb705d6d388de9afbcc0df3003f4d4f45f1fbd",
"index": 3989,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngreeting.pack()\n<mask token>\nguess.pack()\n<mask token>\nsubmit.pack()\nwindow.mainloop()\n",
"step-3": "<mask token>\nsecret = random.randint(1, 100)\nwindow = Tkinter.Tk()\ngreeting = Tkinter.Label(window, text='Guess the secret number!')\ngreeting.pack()\nguess = Tkinter.Entry(window)\nguess.pack()\nsubmit = Tkinter.Button(window, text='Submit')\nsubmit.pack()\nwindow.mainloop()\n",
"step-4": "import Tkinter\nimport random\nsecret = random.randint(1, 100)\nwindow = Tkinter.Tk()\ngreeting = Tkinter.Label(window, text='Guess the secret number!')\ngreeting.pack()\nguess = Tkinter.Entry(window)\nguess.pack()\nsubmit = Tkinter.Button(window, text='Submit')\nsubmit.pack()\nwindow.mainloop()\n",
"step-5": "import Tkinter\nimport random\n\nsecret = random.randint(1, 100)\n\n### TKINTER ELEMENTS ###\n\nwindow = Tkinter.Tk()\n\n# greeting text\ngreeting = Tkinter.Label(window, text=\"Guess the secret number!\")\ngreeting.pack()\n\n# guess entry field\nguess = Tkinter.Entry(window)\nguess.pack()\n\n# submit button\nsubmit = Tkinter.Button(window, text=\"Submit\") # add a button, but this button is doing nothing\nsubmit.pack()\n\nwindow.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Write by Jess.S 25/1/2019
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
def draw_point(x,y):
plt.scatter(x, y)
plt.title('点分布图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def draw_route(route_list,x,y):
plt.scatter(x, y)
for route in route_list:
route= np.array(route)
# print(route.shape)
plt.plot(route[:,0],route[:,1])
plt.title('路径图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def read_data(path,node):
csv_data = pd.read_csv(path) # 读取训练数据
# print(csv_data)
x = csv_data['Easting']
y = csv_data['Southing']
# print(x)
# print(y)
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
# print(node)
node_sort =sorted(node, key=lambda x: (x[0], x[1]))
# print(node_sort)
#另一种利用numpy的排序方法
# node = np.array(node)
# node = node[np.lexsort(node[:,::-1].T)]
# print(node)
return node_sort,x,y
#判断前沿面的点是否被更新
# def dominant(prev,current):
# if prev[0]<current[0] & prev[1]<current[1]:
# return True
# return False
#
# #判断两条路径是否有重叠部分
# def judge_line(origin,n1,n2):
# if((n1[1]-origin[1])/(n1[0]-origin[0])==(n2[1]-origin[1])/(n2[0]-origin[0])):
# return True
# return False
def init_routing(route_number,route_list,leading_edge,node_sort):
for n in node_sort:
if(n == node_sort[0]):
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if(len(route_list)>=route_number):
return route_list
return
def expand(route_list,leading_edge,node_sort,route_number):
for i in range(len(node_sort)):
if(i<=route_number):
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if(leading_edge[a][1]>y_min):
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if((delta_y>=0) & (delta_y<y_min)):
y_min = delta_y
index = n
if(index < 0):
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
if __name__=='__main__':
path = 'coordinates v1.csv'
node = []#所有点的坐标信息,下面进行排序
route_list = []#存储现有的路径信息
leading_edge = []#存储路径最前沿延续的路径index
route_number = 6
node_sort,x,y = read_data(path, node)
route_list = init_routing(route_number, route_list, leading_edge,node_sort)
route_list = expand(route_list, leading_edge, node_sort, route_number)
route_list = np.array(route_list)
draw_route(route_list,x,y)
print(route_list)
|
normal
|
{
"blob_id": "1c60620814a4aea2573caf99cee87590a8d57c18",
"index": 5483,
"step-1": "<mask token>\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-3": "<mask token>\nplt.rcParams['font.sans-serif'] = ['FangSong']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.sans-serif'] = ['FangSong']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-5": "#Write by Jess.S 25/1/2019\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nplt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体\r\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\r\n\r\ndef draw_point(x,y):\r\n plt.scatter(x, y)\r\n plt.title('点分布图')#显示图表标题\r\n plt.xlabel('x轴')#x轴名称\r\n plt.ylabel('y轴')#y轴名称\r\n plt.grid(True)#显示网格线\r\n plt.show()\r\n\r\ndef draw_route(route_list,x,y):\r\n plt.scatter(x, y)\r\n for route in route_list:\r\n route= np.array(route)\r\n# print(route.shape)\r\n plt.plot(route[:,0],route[:,1])\r\n plt.title('路径图')#显示图表标题\r\n plt.xlabel('x轴')#x轴名称\r\n plt.ylabel('y轴')#y轴名称\r\n plt.grid(True)#显示网格线\r\n plt.show()\r\n \r\ndef read_data(path,node):\r\n csv_data = pd.read_csv(path) # 读取训练数据\r\n # print(csv_data)\r\n x = csv_data['Easting']\r\n y = csv_data['Southing']\r\n\r\n # print(x)\r\n # print(y)\r\n for i in range(len(x)):\r\n xy = []\r\n xy.append(x[i])\r\n xy.append(y[i])\r\n node.append(xy)\r\n # print(node)\r\n node_sort =sorted(node, key=lambda x: (x[0], x[1]))\r\n # print(node_sort)\r\n #另一种利用numpy的排序方法\r\n \r\n # node = np.array(node)\r\n # node = node[np.lexsort(node[:,::-1].T)]\r\n # print(node)\r\n return node_sort,x,y\r\n#判断前沿面的点是否被更新\r\n# def dominant(prev,current):\r\n# if prev[0]<current[0] & prev[1]<current[1]:\r\n# return True\r\n# return False\r\n# \r\n# #判断两条路径是否有重叠部分\r\n# def judge_line(origin,n1,n2):\r\n# if((n1[1]-origin[1])/(n1[0]-origin[0])==(n2[1]-origin[1])/(n2[0]-origin[0])):\r\n# return True\r\n# return False\r\n\r\ndef init_routing(route_number,route_list,leading_edge,node_sort): \r\n for n in node_sort:\r\n if(n == node_sort[0]):\r\n continue\r\n route = []\r\n route.append(node_sort[0])\r\n route.append(n)\r\n route_list.append(route)\r\n leading_edge.append(n)\r\n if(len(route_list)>=route_number):\r\n return route_list\r\n return\r\n \r\ndef expand(route_list,leading_edge,node_sort,route_number):\r\n for i in range(len(node_sort)):\r\n if(i<=route_number):\r\n continue\r\n y_min = 0\r\n max_index = 0\r\n for a in range(len(leading_edge)):\r\n if(leading_edge[a][1]>y_min):\r\n y_min = leading_edge[a][1]\r\n max_index = a\r\n index = -1\r\n for n in range(len(leading_edge)):\r\n delta_y = leading_edge[n][1] - node_sort[i][1]\r\n if((delta_y>=0) & (delta_y<y_min)):\r\n y_min = delta_y\r\n index = n\r\n if(index < 0):\r\n index = max_index \r\n route_list[index].append(node_sort[i])\r\n leading_edge[index] = node_sort[i]\r\n return route_list \r\n\r\nif __name__=='__main__':\r\n path = 'coordinates v1.csv'\r\n node = []#所有点的坐标信息,下面进行排序\r\n route_list = []#存储现有的路径信息\r\n leading_edge = []#存储路径最前沿延续的路径index\r\n route_number = 6\r\n node_sort,x,y = read_data(path, node)\r\n route_list = init_routing(route_number, route_list, leading_edge,node_sort)\r\n route_list = expand(route_list, leading_edge, node_sort, route_number)\r\n route_list = np.array(route_list)\r\n draw_route(route_list,x,y)\r\n print(route_list)\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from pyecharts import options as opts
from pyecharts.charts import *
import pandas as pd
import namemap
from pyecharts.globals import ThemeType
#
import time
import json
import requests
from datetime import datetime
import pandas as pd
import numpy as np
def read_country_code():
"""
获取国家中英文字典
:return:
"""
country_dict = {}
for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换
country_dict[val] = key
return country_dict
def read_csv():
"""
读取数据,返回国家英文名称列表和累计确诊数列表
:return:
"""
country_dict = read_country_code()
data = pd.read_csv("2019-nCoV.csv", index_col=False)
countrys_names = list()
confirmed_count = list()
for x in range(len(data.index)):
if data['name'].iloc[x] in country_dict.keys():
countrys_names.append(country_dict[data['name'].iloc[x]])
confirmed_count.append(data['confirm'].iloc[x])
else:
print(data['name'].iloc[x])
return countrys_names, confirmed_count
def catch_data():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
reponse = requests.get(url=url).json()
data = json.loads(reponse['data'])
return data
# 定义数据处理函数
def confirm(x):
confirm = eval(str(x))['confirm']
return confirm
def suspect(x):
suspect = eval(str(x))['suspect']
return suspect
def dead(x):
dead = eval(str(x))['dead']
return dead
def heal(x):
heal = eval(str(x))['heal']
return heal
def draw_map():
"""
china!
"""
data = catch_data()
dict_keys = data.keys()
# China
lastUpdateTime = data['lastUpdateTime']
chinaTotal = data['chinaTotal']
chinaAdd = data['chinaAdd']
#结果{'confirm': 84970, 'heal': 79963, 'dead': 4645, 'nowConfirm': 362, 'suspect': 11,
#'nowSevere': 13, 'importedCase': 1868, 'noInfect': 108}
areaTree = data['areaTree']
china_data = areaTree[0]['children']
china_list = []
for a in range(len(china_data)):
province = china_data[a]['name']
province_list = china_data[a]['children']
for b in range(len(province_list)):
city = province_list[b]['name']
total = province_list[b]['total']
today = province_list[b]['today']
china_dict = {}
china_dict['province'] = province
china_dict['city'] = city
china_dict['total'] = total
china_dict['today'] = today
china_list.append(china_dict)
china_data = pd.DataFrame(china_list)
china_data.head()
# 函数映射
china_data['confirm'] = china_data['total'].map(confirm)
china_data['suspect'] = china_data['total'].map(suspect)
china_data['dead'] = china_data['total'].map(dead)
china_data['heal'] = china_data['total'].map(heal)
china_data['addconfirm'] = china_data['today'].map(confirm)
#['addsuspect'] = china_data['today'].map(suspect)
#china_data['adddead'] = china_data['today'].map(dead)
#china_data['addheal'] = china_data['today'].map(heal)
china_data = china_data[["province","city","confirm","suspect","dead","heal","addconfirm"]]
china_data.head()
total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小
total_pie.add("",[list(z) for z in zip(chinaTotal.keys(), chinaTotal.values())],
center=["50%", "70%"], #图的位置
radius=[50, 80]) #内外径大小
total_pie.set_global_opts(
title_opts=opts.TitleOpts(title="全国总量",subtitle=("截止"+lastUpdateTime)))
total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter="{c}")) #标签格式
total_pie.render_notebook()
totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小
totaladd_pie.add("",[list(z) for z in zip(chinaAdd.keys(), chinaAdd.values())],
center=["50%", "50%"],
radius=[50, 80])
totaladd_pie.set_global_opts(
title_opts=opts.TitleOpts(title="昨日新增"))
totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter="{c}")) #标签格式
totaladd_pie.render_notebook()
area_data = china_data.groupby("province")["confirm"].sum().reset_index()
area_data.columns = ["province","confirm"]
area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))
area_map.add("",[list(z) for z in zip(list(area_data["province"]), list(area_data["confirm"]))], "china",is_map_symbol_show=False)
area_map.set_global_opts(title_opts=opts.TitleOpts(title="2019_nCoV中国疫情地图"),visualmap_opts=opts.VisualMapOpts(is_piecewise=True,
pieces = [
{"min": 1001 , "label": '>1000',"color": "#893448"}, #不指定 max,表示 max 为无限大
{"min": 500, "max": 1000, "label": '500-1000',"color": "#ff585e"},
{"min": 101, "max": 499, "label": '101-499',"color": "#fb8146"},
{"min": 10, "max": 100, "label": '10-100',"color": "#ffb248"},
{"min": 0, "max": 9, "label": '0-9',"color" : "#fff2d1" }]))
area_map.render_notebook()
page = Page()
page.add(total_pie)
page.add(totaladd_pie)
page.add(area_map)
"""
绘制世界地图
遇到一个很神奇的问题:
两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据
:return:
"""
# 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int
# 感谢公众号的 @李康伟 同学提出
countrys_names, confirmed_count = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
# countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', "Côte d'Ivoire", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']
#
# confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]
c = (
Map()
.add(
"确诊人数",
[list(z) for z in zip(countrys_names, confirmed_count_list)],
is_map_symbol_show=False,
maptype="world",
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(color="rgb(49,60,72)")
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="全球 2019-nCoV 地图"),
visualmap_opts=opts.VisualMapOpts(max_=1700000),
)
#.render("map_world.html")
)
page.add(c)
page.render('covid-19 中国和世界数据.html')
|
normal
|
{
"blob_id": "fe3584dd858c06d66215b4a182adf87d35324975",
"index": 4486,
"step-1": "<mask token>\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\n<mask token>\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\ndef suspect(x):\n suspect = eval(str(x))['suspect']\n return suspect\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\ndef suspect(x):\n suspect = eval(str(x))['suspect']\n return suspect\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\ndef draw_map():\n \"\"\"\n china!\n \"\"\"\n data = catch_data()\n dict_keys = data.keys()\n lastUpdateTime = data['lastUpdateTime']\n chinaTotal = data['chinaTotal']\n chinaAdd = data['chinaAdd']\n areaTree = data['areaTree']\n china_data = areaTree[0]['children']\n china_list = []\n for a in range(len(china_data)):\n province = china_data[a]['name']\n province_list = china_data[a]['children']\n for b in range(len(province_list)):\n city = province_list[b]['name']\n total = province_list[b]['total']\n today = province_list[b]['today']\n china_dict = {}\n china_dict['province'] = province\n china_dict['city'] = city\n china_dict['total'] = total\n china_dict['today'] = today\n china_list.append(china_dict)\n china_data = pd.DataFrame(china_list)\n china_data.head()\n china_data['confirm'] = china_data['total'].map(confirm)\n china_data['suspect'] = china_data['total'].map(suspect)\n china_data['dead'] = china_data['total'].map(dead)\n china_data['heal'] = china_data['total'].map(heal)\n china_data['addconfirm'] = china_data['today'].map(confirm)\n china_data = china_data[['province', 'city', 'confirm', 'suspect',\n 'dead', 'heal', 'addconfirm']]\n china_data.head()\n total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS, width\n ='900px', height='350px'))\n total_pie.add('', [list(z) for z in zip(chinaTotal.keys(), chinaTotal.\n values())], center=['50%', '70%'], radius=[50, 80])\n total_pie.set_global_opts(title_opts=opts.TitleOpts(title='全国总量',\n subtitle='截止' + lastUpdateTime))\n total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n total_pie.render_notebook()\n totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,\n width='900px', height='350px'))\n totaladd_pie.add('', [list(z) for z in zip(chinaAdd.keys(), chinaAdd.\n values())], center=['50%', '50%'], radius=[50, 80])\n totaladd_pie.set_global_opts(title_opts=opts.TitleOpts(title='昨日新增'))\n totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n totaladd_pie.render_notebook()\n area_data = china_data.groupby('province')['confirm'].sum().reset_index()\n area_data.columns = ['province', 'confirm']\n area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))\n area_map.add('', [list(z) for z in zip(list(area_data['province']),\n list(area_data['confirm']))], 'china', is_map_symbol_show=False)\n area_map.set_global_opts(title_opts=opts.TitleOpts(title=\n '2019_nCoV中国疫情地图'), visualmap_opts=opts.VisualMapOpts(is_piecewise=\n True, pieces=[{'min': 1001, 'label': '>1000', 'color': '#893448'},\n {'min': 500, 'max': 1000, 'label': '500-1000', 'color': '#ff585e'},\n {'min': 101, 'max': 499, 'label': '101-499', 'color': '#fb8146'}, {\n 'min': 10, 'max': 100, 'label': '10-100', 'color': '#ffb248'}, {\n 'min': 0, 'max': 9, 'label': '0-9', 'color': '#fff2d1'}]))\n area_map.render_notebook()\n page = Page()\n page.add(total_pie)\n page.add(totaladd_pie)\n page.add(area_map)\n \"\"\"\n 绘制世界地图\n 遇到一个很神奇的问题:\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\n :return:\n \"\"\"\n countrys_names, confirmed_count = read_csv()\n confirmed_count_list = []\n for item in confirmed_count:\n confirmed_count_list.append(int(item))\n c = Map().add('确诊人数', [list(z) for z in zip(countrys_names,\n confirmed_count_list)], is_map_symbol_show=False, maptype='world',\n label_opts=opts.LabelOpts(is_show=False), itemstyle_opts=opts.\n ItemStyleOpts(color='rgb(49,60,72)')).set_series_opts(label_opts=\n opts.LabelOpts(is_show=False)).set_global_opts(title_opts=opts.\n TitleOpts(title='全球 2019-nCoV 地图'), visualmap_opts=opts.\n VisualMapOpts(max_=1700000))\n page.add(c)\n page.render('covid-19 中国和世界数据.html')\n",
"step-4": "from pyecharts import options as opts\nfrom pyecharts.charts import *\nimport pandas as pd\nimport namemap\nfrom pyecharts.globals import ThemeType\nimport time\nimport json\nimport requests\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\ndef suspect(x):\n suspect = eval(str(x))['suspect']\n return suspect\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\ndef draw_map():\n \"\"\"\n china!\n \"\"\"\n data = catch_data()\n dict_keys = data.keys()\n lastUpdateTime = data['lastUpdateTime']\n chinaTotal = data['chinaTotal']\n chinaAdd = data['chinaAdd']\n areaTree = data['areaTree']\n china_data = areaTree[0]['children']\n china_list = []\n for a in range(len(china_data)):\n province = china_data[a]['name']\n province_list = china_data[a]['children']\n for b in range(len(province_list)):\n city = province_list[b]['name']\n total = province_list[b]['total']\n today = province_list[b]['today']\n china_dict = {}\n china_dict['province'] = province\n china_dict['city'] = city\n china_dict['total'] = total\n china_dict['today'] = today\n china_list.append(china_dict)\n china_data = pd.DataFrame(china_list)\n china_data.head()\n china_data['confirm'] = china_data['total'].map(confirm)\n china_data['suspect'] = china_data['total'].map(suspect)\n china_data['dead'] = china_data['total'].map(dead)\n china_data['heal'] = china_data['total'].map(heal)\n china_data['addconfirm'] = china_data['today'].map(confirm)\n china_data = china_data[['province', 'city', 'confirm', 'suspect',\n 'dead', 'heal', 'addconfirm']]\n china_data.head()\n total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS, width\n ='900px', height='350px'))\n total_pie.add('', [list(z) for z in zip(chinaTotal.keys(), chinaTotal.\n values())], center=['50%', '70%'], radius=[50, 80])\n total_pie.set_global_opts(title_opts=opts.TitleOpts(title='全国总量',\n subtitle='截止' + lastUpdateTime))\n total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n total_pie.render_notebook()\n totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,\n width='900px', height='350px'))\n totaladd_pie.add('', [list(z) for z in zip(chinaAdd.keys(), chinaAdd.\n values())], center=['50%', '50%'], radius=[50, 80])\n totaladd_pie.set_global_opts(title_opts=opts.TitleOpts(title='昨日新增'))\n totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n totaladd_pie.render_notebook()\n area_data = china_data.groupby('province')['confirm'].sum().reset_index()\n area_data.columns = ['province', 'confirm']\n area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))\n area_map.add('', [list(z) for z in zip(list(area_data['province']),\n list(area_data['confirm']))], 'china', is_map_symbol_show=False)\n area_map.set_global_opts(title_opts=opts.TitleOpts(title=\n '2019_nCoV中国疫情地图'), visualmap_opts=opts.VisualMapOpts(is_piecewise=\n True, pieces=[{'min': 1001, 'label': '>1000', 'color': '#893448'},\n {'min': 500, 'max': 1000, 'label': '500-1000', 'color': '#ff585e'},\n {'min': 101, 'max': 499, 'label': '101-499', 'color': '#fb8146'}, {\n 'min': 10, 'max': 100, 'label': '10-100', 'color': '#ffb248'}, {\n 'min': 0, 'max': 9, 'label': '0-9', 'color': '#fff2d1'}]))\n area_map.render_notebook()\n page = Page()\n page.add(total_pie)\n page.add(totaladd_pie)\n page.add(area_map)\n \"\"\"\n 绘制世界地图\n 遇到一个很神奇的问题:\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\n :return:\n \"\"\"\n countrys_names, confirmed_count = read_csv()\n confirmed_count_list = []\n for item in confirmed_count:\n confirmed_count_list.append(int(item))\n c = Map().add('确诊人数', [list(z) for z in zip(countrys_names,\n confirmed_count_list)], is_map_symbol_show=False, maptype='world',\n label_opts=opts.LabelOpts(is_show=False), itemstyle_opts=opts.\n ItemStyleOpts(color='rgb(49,60,72)')).set_series_opts(label_opts=\n opts.LabelOpts(is_show=False)).set_global_opts(title_opts=opts.\n TitleOpts(title='全球 2019-nCoV 地图'), visualmap_opts=opts.\n VisualMapOpts(max_=1700000))\n page.add(c)\n page.render('covid-19 中国和世界数据.html')\n",
"step-5": "from pyecharts import options as opts\r\nfrom pyecharts.charts import *\r\nimport pandas as pd\r\nimport namemap\r\nfrom pyecharts.globals import ThemeType\r\n\r\n\r\n#\r\nimport time \r\nimport json\r\nimport requests\r\nfrom datetime import datetime\r\nimport pandas as pd \r\nimport numpy as np\r\n \r\ndef read_country_code():\r\n \"\"\"\r\n 获取国家中英文字典\r\n :return:\r\n \"\"\"\r\n country_dict = {}\r\n for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换\r\n country_dict[val] = key\r\n return country_dict\r\n\r\ndef read_csv():\r\n \"\"\"\r\n 读取数据,返回国家英文名称列表和累计确诊数列表\r\n :return:\r\n \"\"\"\r\n country_dict = read_country_code()\r\n data = pd.read_csv(\"2019-nCoV.csv\", index_col=False)\r\n\r\n countrys_names = list()\r\n confirmed_count = list()\r\n\r\n for x in range(len(data.index)):\r\n if data['name'].iloc[x] in country_dict.keys():\r\n countrys_names.append(country_dict[data['name'].iloc[x]])\r\n confirmed_count.append(data['confirm'].iloc[x])\r\n else:\r\n print(data['name'].iloc[x])\r\n\r\n return countrys_names, confirmed_count\r\n\r\n\r\ndef catch_data():\r\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\r\n reponse = requests.get(url=url).json()\r\n data = json.loads(reponse['data'])\r\n return data\r\n\r\n\r\n\r\n\r\n# 定义数据处理函数\r\ndef confirm(x):\r\n confirm = eval(str(x))['confirm']\r\n return confirm\r\ndef suspect(x):\r\n suspect = eval(str(x))['suspect']\r\n return suspect\r\ndef dead(x):\r\n dead = eval(str(x))['dead']\r\n return dead\r\ndef heal(x):\r\n heal = eval(str(x))['heal']\r\n return heal\r\n\r\ndef draw_map():\r\n \"\"\"\r\n china!\r\n \"\"\"\r\n data = catch_data()\r\n dict_keys = data.keys()\r\n # China\r\n lastUpdateTime = data['lastUpdateTime']\r\n chinaTotal = data['chinaTotal']\r\n chinaAdd = data['chinaAdd']\r\n #结果{'confirm': 84970, 'heal': 79963, 'dead': 4645, 'nowConfirm': 362, 'suspect': 11, \r\n #'nowSevere': 13, 'importedCase': 1868, 'noInfect': 108}\r\n areaTree = data['areaTree']\r\n china_data = areaTree[0]['children']\r\n china_list = []\r\n for a in range(len(china_data)):\r\n province = china_data[a]['name']\r\n province_list = china_data[a]['children']\r\n for b in range(len(province_list)):\r\n city = province_list[b]['name']\r\n total = province_list[b]['total']\r\n today = province_list[b]['today']\r\n china_dict = {}\r\n china_dict['province'] = province\r\n china_dict['city'] = city\r\n china_dict['total'] = total\r\n china_dict['today'] = today\r\n china_list.append(china_dict)\r\n china_data = pd.DataFrame(china_list)\r\n china_data.head()\r\n \r\n # 函数映射\r\n china_data['confirm'] = china_data['total'].map(confirm)\r\n china_data['suspect'] = china_data['total'].map(suspect)\r\n china_data['dead'] = china_data['total'].map(dead)\r\n china_data['heal'] = china_data['total'].map(heal)\r\n china_data['addconfirm'] = china_data['today'].map(confirm)\r\n #['addsuspect'] = china_data['today'].map(suspect)\r\n #china_data['adddead'] = china_data['today'].map(dead)\r\n #china_data['addheal'] = china_data['today'].map(heal)\r\n china_data = china_data[[\"province\",\"city\",\"confirm\",\"suspect\",\"dead\",\"heal\",\"addconfirm\"]]\r\n china_data.head()\r\n\r\n\r\n\r\n total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小\r\n total_pie.add(\"\",[list(z) for z in zip(chinaTotal.keys(), chinaTotal.values())],\r\n center=[\"50%\", \"70%\"], #图的位置\r\n radius=[50, 80]) #内外径大小\r\n total_pie.set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"全国总量\",subtitle=(\"截止\"+lastUpdateTime)))\r\n total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter=\"{c}\")) #标签格式\r\n total_pie.render_notebook()\r\n\r\n totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小\r\n totaladd_pie.add(\"\",[list(z) for z in zip(chinaAdd.keys(), chinaAdd.values())],\r\n center=[\"50%\", \"50%\"],\r\n radius=[50, 80])\r\n totaladd_pie.set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"昨日新增\"))\r\n totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter=\"{c}\")) #标签格式\r\n totaladd_pie.render_notebook()\r\n\r\n area_data = china_data.groupby(\"province\")[\"confirm\"].sum().reset_index()\r\n area_data.columns = [\"province\",\"confirm\"]\r\n area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))\r\n area_map.add(\"\",[list(z) for z in zip(list(area_data[\"province\"]), list(area_data[\"confirm\"]))], \"china\",is_map_symbol_show=False)\r\n area_map.set_global_opts(title_opts=opts.TitleOpts(title=\"2019_nCoV中国疫情地图\"),visualmap_opts=opts.VisualMapOpts(is_piecewise=True,\r\n pieces = [\r\n {\"min\": 1001 , \"label\": '>1000',\"color\": \"#893448\"}, #不指定 max,表示 max 为无限大\r\n {\"min\": 500, \"max\": 1000, \"label\": '500-1000',\"color\": \"#ff585e\"},\r\n {\"min\": 101, \"max\": 499, \"label\": '101-499',\"color\": \"#fb8146\"},\r\n {\"min\": 10, \"max\": 100, \"label\": '10-100',\"color\": \"#ffb248\"},\r\n {\"min\": 0, \"max\": 9, \"label\": '0-9',\"color\" : \"#fff2d1\" }]))\r\n area_map.render_notebook()\r\n\r\n\r\n page = Page()\r\n page.add(total_pie)\r\n page.add(totaladd_pie)\r\n page.add(area_map)\r\n\r\n\r\n \"\"\"\r\n 绘制世界地图\r\n 遇到一个很神奇的问题:\r\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\r\n :return:\r\n \"\"\"\r\n\r\n # 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int\r\n # 感谢公众号的 @李康伟 同学提出\r\n countrys_names, confirmed_count = read_csv()\r\n confirmed_count_list = []\r\n for item in confirmed_count:\r\n confirmed_count_list.append(int(item))\r\n\r\n # countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', \"Côte d'Ivoire\", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']\r\n # \r\n # confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]\r\n\r\n\r\n c = (\r\n Map()\r\n .add(\r\n \"确诊人数\",\r\n [list(z) for z in zip(countrys_names, confirmed_count_list)],\r\n is_map_symbol_show=False,\r\n maptype=\"world\",\r\n label_opts=opts.LabelOpts(is_show=False),\r\n itemstyle_opts=opts.ItemStyleOpts(color=\"rgb(49,60,72)\")\r\n )\r\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"全球 2019-nCoV 地图\"),\r\n visualmap_opts=opts.VisualMapOpts(max_=1700000),\r\n )\r\n #.render(\"map_world.html\")\r\n )\r\n page.add(c)\r\n page.render('covid-19 中国和世界数据.html')\r\n\r\n\r\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import sys
sys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')
from numpy import sin, linspace
x = linspace(0, 4, 101)
y = sin(x)
from numpy import sin, linspace
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')
plt.plot(x, y2)
plt.plot(x, y2, color = "#530000")
y1=x
plt.plot(x, y1, color = "#530000")
y2 = y1 - x*x*x/(1*2*3)
plt.plot(x, y2, color = "#530000")
plt.show()
|
normal
|
{
"blob_id": "1dcea61908753777604d99235407981e89c3b9d4",
"index": 4452,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\n<mask token>\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color='#530000')\n<mask token>\nplt.plot(x, y1, color='#530000')\n<mask token>\nplt.plot(x, y2, color='#530000')\nplt.show()\n",
"step-3": "<mask token>\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\n<mask token>\nx = linspace(0, 4, 101)\ny = sin(x)\n<mask token>\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color='#530000')\ny1 = x\nplt.plot(x, y1, color='#530000')\ny2 = y1 - x * x * x / (1 * 2 * 3)\nplt.plot(x, y2, color='#530000')\nplt.show()\n",
"step-4": "import sys\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\nfrom numpy import sin, linspace\nx = linspace(0, 4, 101)\ny = sin(x)\nfrom numpy import sin, linspace\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color='#530000')\ny1 = x\nplt.plot(x, y1, color='#530000')\ny2 = y1 - x * x * x / (1 * 2 * 3)\nplt.plot(x, y2, color='#530000')\nplt.show()\n",
"step-5": "import sys\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\n\nfrom numpy import sin, linspace\nx = linspace(0, 4, 101)\ny = sin(x)\n\nfrom numpy import sin, linspace\nplt.grid()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')\nplt.plot(x, y2)\nplt.plot(x, y2, color = \"#530000\")\n\ny1=x\nplt.plot(x, y1, color = \"#530000\")\n\ny2 = y1 - x*x*x/(1*2*3)\nplt.plot(x, y2, color = \"#530000\")\n\nplt.show()\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import unittest
from pathlib import Path
from deepdiff import DeepDiff
from electricitymap.contrib import config
CONFIG_DIR = Path(__file__).parent.parent.joinpath("config").resolve()
class ConfigTestcase(unittest.TestCase):
def test_generate_zone_neighbours_two_countries(self):
exchanges = {
"DE->FR": {"parsers": {"exchange": "source"}},
}
zones = {
"DE": {},
"FR": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {"DE": ["FR"], "FR": ["DE"]})
def test_generate_zone_neighbours_one_country_one_subzone(self):
exchanges = {
"DE->SE-SE4": {"parsers": {"exchange": "source"}},
}
zones = {
"DE": {},
"SE": {
"subZoneNames": ["SE-SE4"],
},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {"DE": ["SE-SE4"], "SE-SE4": ["DE"]})
def test_generate_zone_neighbours_two_subzones(self):
exchanges = {
"NO-NO1->SE-SE3": {"parsers": {"exchange": "source"}},
"NO-NO3->SE-SE2": {"parsers": {"exchange": "source"}},
"NO-NO4->SE-SE1": {"parsers": {"exchange": "source"}},
"NO-NO4->SE-SE2": {"parsers": {"exchange": "source"}},
}
zones = {
"NO": {
"subZoneNames": ["NO-NO1", "NO-NO2", "NO-NO3", "NO-NO4", "NO-NO5"],
},
"NO-NO1": {},
"NO-NO2": {},
"NO-NO3": {},
"NO-NO4": {},
"NO-NO5": {},
"SE": {
"subZoneNames": ["SE-SE1", "SE-SE2", "SE-SE3", "SE-SE4"],
},
"SE-SE1": {},
"SE-SE2": {},
"SE-SE3": {},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{
"NO-NO1": ["SE-SE3"],
"NO-NO3": ["SE-SE2"],
"NO-NO4": ["SE-SE1", "SE-SE2"],
"SE-SE1": ["NO-NO4"],
"SE-SE2": ["NO-NO3", "NO-NO4"],
"SE-SE3": ["NO-NO1"],
},
)
def test_generate_zone_neighbours_two_subzones_from_same(self):
exchanges = {
"SE-SE1->SE-SE2": {"parsers": {"exchange": "source"}},
}
zones = {
"SE": {
"subZoneNames": ["SE-SE1", "SE-SE2", "SE-SE3", "SE-SE4"],
},
"SE-SE1": {},
"SE-SE2": {},
"SE-SE3": {},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{"SE-SE1": ["SE-SE2"], "SE-SE2": ["SE-SE1"]},
)
def test_generate_zone_neighbours_GB(self):
# That's an interesting case as GB has islands, which are not subzones
# It means that GB->GB-NIR are valid exchanges and that
# GB and GB-NIR are neighbours
exchanges = {
"GB->GB-NIR": {"parsers": {"exchange": "source"}},
"GB->GB-ORK": {"parsers": {"exchange": "source"}},
}
zones = {
"GB": {},
"GB-NIR": {},
"GB-ORK": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{"GB": ["GB-NIR", "GB-ORK"], "GB-NIR": ["GB"], "GB-ORK": ["GB"]},
)
def test_generate_zone_neighbours_no_exchange_parser(self):
exchanges = {
"DE->FR": {"parsers": {}},
}
zones = {
"DE": {},
"FR": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {})
def test_ZONE_NEIGHBOURS(self):
zone_neighbours = config.generate_zone_neighbours(
config.ZONES_CONFIG, config.EXCHANGES_CONFIG
)
self.assertIn("DK-DK1", zone_neighbours.keys())
dk_neighbours = zone_neighbours["DK-DK1"]
self.assertGreater(
len(dk_neighbours), 1, "expected a few neighbours for DK-DK1"
)
if __name__ == "__main__":
unittest.main(buffer=True)
|
normal
|
{
"blob_id": "22b8ecfecc0e76d758f14dea865a426db56c6343",
"index": 3538,
"step-1": "<mask token>\n\n\nclass ConfigTestcase(unittest.TestCase):\n <mask token>\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConfigTestcase(unittest.TestCase):\n\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\n<mask token>\n",
"step-3": "<mask token>\nCONFIG_DIR = Path(__file__).parent.parent.joinpath('config').resolve()\n\n\nclass ConfigTestcase(unittest.TestCase):\n\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\nif __name__ == '__main__':\n unittest.main(buffer=True)\n",
"step-4": "import json\nimport unittest\nfrom pathlib import Path\nfrom deepdiff import DeepDiff\nfrom electricitymap.contrib import config\nCONFIG_DIR = Path(__file__).parent.parent.joinpath('config').resolve()\n\n\nclass ConfigTestcase(unittest.TestCase):\n\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\nif __name__ == '__main__':\n unittest.main(buffer=True)\n",
"step-5": "import json\nimport unittest\nfrom pathlib import Path\n\nfrom deepdiff import DeepDiff\n\nfrom electricitymap.contrib import config\n\nCONFIG_DIR = Path(__file__).parent.parent.joinpath(\"config\").resolve()\n\n\nclass ConfigTestcase(unittest.TestCase):\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {\n \"DE->FR\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"DE\": {},\n \"FR\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {\"DE\": [\"FR\"], \"FR\": [\"DE\"]})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {\n \"DE->SE-SE4\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"DE\": {},\n \"SE\": {\n \"subZoneNames\": [\"SE-SE4\"],\n },\n \"SE-SE4\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {\"DE\": [\"SE-SE4\"], \"SE-SE4\": [\"DE\"]})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {\n \"NO-NO1->SE-SE3\": {\"parsers\": {\"exchange\": \"source\"}},\n \"NO-NO3->SE-SE2\": {\"parsers\": {\"exchange\": \"source\"}},\n \"NO-NO4->SE-SE1\": {\"parsers\": {\"exchange\": \"source\"}},\n \"NO-NO4->SE-SE2\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"NO\": {\n \"subZoneNames\": [\"NO-NO1\", \"NO-NO2\", \"NO-NO3\", \"NO-NO4\", \"NO-NO5\"],\n },\n \"NO-NO1\": {},\n \"NO-NO2\": {},\n \"NO-NO3\": {},\n \"NO-NO4\": {},\n \"NO-NO5\": {},\n \"SE\": {\n \"subZoneNames\": [\"SE-SE1\", \"SE-SE2\", \"SE-SE3\", \"SE-SE4\"],\n },\n \"SE-SE1\": {},\n \"SE-SE2\": {},\n \"SE-SE3\": {},\n \"SE-SE4\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(\n zone_neighbours,\n {\n \"NO-NO1\": [\"SE-SE3\"],\n \"NO-NO3\": [\"SE-SE2\"],\n \"NO-NO4\": [\"SE-SE1\", \"SE-SE2\"],\n \"SE-SE1\": [\"NO-NO4\"],\n \"SE-SE2\": [\"NO-NO3\", \"NO-NO4\"],\n \"SE-SE3\": [\"NO-NO1\"],\n },\n )\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {\n \"SE-SE1->SE-SE2\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"SE\": {\n \"subZoneNames\": [\"SE-SE1\", \"SE-SE2\", \"SE-SE3\", \"SE-SE4\"],\n },\n \"SE-SE1\": {},\n \"SE-SE2\": {},\n \"SE-SE3\": {},\n \"SE-SE4\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(\n zone_neighbours,\n {\"SE-SE1\": [\"SE-SE2\"], \"SE-SE2\": [\"SE-SE1\"]},\n )\n\n def test_generate_zone_neighbours_GB(self):\n # That's an interesting case as GB has islands, which are not subzones\n # It means that GB->GB-NIR are valid exchanges and that\n # GB and GB-NIR are neighbours\n exchanges = {\n \"GB->GB-NIR\": {\"parsers\": {\"exchange\": \"source\"}},\n \"GB->GB-ORK\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"GB\": {},\n \"GB-NIR\": {},\n \"GB-ORK\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(\n zone_neighbours,\n {\"GB\": [\"GB-NIR\", \"GB-ORK\"], \"GB-NIR\": [\"GB\"], \"GB-ORK\": [\"GB\"]},\n )\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {\n \"DE->FR\": {\"parsers\": {}},\n }\n zones = {\n \"DE\": {},\n \"FR\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(\n config.ZONES_CONFIG, config.EXCHANGES_CONFIG\n )\n self.assertIn(\"DK-DK1\", zone_neighbours.keys())\n dk_neighbours = zone_neighbours[\"DK-DK1\"]\n\n self.assertGreater(\n len(dk_neighbours), 1, \"expected a few neighbours for DK-DK1\"\n )\n\n\nif __name__ == \"__main__\":\n unittest.main(buffer=True)\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
# -*- coding: utf-8 -*-
"""TODO
"""
import logging
import numpy
import evo.gp.support
import evo.sr
import evo.utils.stats
class RegressionFitness(evo.Fitness):
LOG = logging.getLogger(__name__ + '.RegressionFitness')
def __init__(self, train_inputs, train_output, error_fitness,
handled_errors, stats: evo.utils.stats.Stats=None,
store_bsfs: bool=True,
fitness_measure: evo.sr.ErrorMeasure=evo.sr.ErrorMeasure.R2):
super().__init__(store_bsfs)
self.train_inputs = train_inputs
self.train_output = numpy.array(train_output, copy=False)
self.ssw = numpy.sum(
(self.train_output - self.train_output.mean()) ** 2)
self.error_fitness = error_fitness
self.errors = tuple([evo.UnevaluableError] + handled_errors)
self.stats = stats
self.fitness_measure = fitness_measure
def evaluate_individual(self, individual: evo.gp.support.ForestIndividual,
context=None):
assert individual.genes_num == 1
RegressionFitness.LOG.debug(
'Evaluating individual %s in context %s', individual.__str__(),
str(context))
try:
output = self.get_eval(individual, self.train_inputs)
fitness = self.get_error(output, individual)
individual.set_fitness(fitness)
except self.errors as _:
RegressionFitness.LOG.debug(
'Exception occurred during evaluation, assigning fitness %f',
self.error_fitness, exc_info=True)
fitness = self.error_fitness
individual.set_fitness(fitness)
return individual.get_fitness()
def compare(self, i1: evo.gp.support.ForestIndividual,
i2: evo.gp.support.ForestIndividual, context=None):
f1 = i1.get_fitness()
f2 = i2.get_fitness()
if f1 is None and f2 is not None:
raise ValueError('First individual has no fitness.')
if f1 is not None and f2 is None:
raise ValueError('Second individual has no fitness.')
if f1 is None and f2 is None:
raise ValueError('Neither individual has fitness.')
return self.fitness_cmp(f1, f2)
def get_eval(self, individual: evo.gp.support.ForestIndividual,
args):
return individual.genotype[0].eval(args=args)
def get_error(self, output, individual: evo.gp.support.ForestIndividual):
e = self.train_output - output
ae = numpy.abs(e)
sse = e.dot(e)
r2 = 1 - sse / self.ssw
mse = sse / numpy.alen(e)
mae = numpy.sum(ae) / numpy.alen(e)
worst_case_ae = ae.max()
individual.set_data('R2', r2)
individual.set_data('MSE', mse)
individual.set_data('MAE', mae)
individual.set_data('WORST_CASE_AE', worst_case_ae)
if self.fitness_measure is evo.sr.ErrorMeasure.R2:
return r2
if self.fitness_measure is evo.sr.ErrorMeasure.MSE:
return mse
if self.fitness_measure is evo.sr.ErrorMeasure.MAE:
return mae
if self.fitness_measure is evo.sr.ErrorMeasure.WORST_CASE_AE:
return worst_case_ae
raise ValueError('Invalid value of fitness_measure.')
def fitness_cmp(self, f1, f2):
if self.fitness_measure is evo.sr.ErrorMeasure.R2:
if f1 > f2:
return -1
if f1 < f2:
return 1
else:
if f1 < f2:
return -1
if f1 > f2:
return 1
return 0
def full_model_str(individual: evo.gp.support.ForestIndividual,
**kwargs) -> str:
newline_genes = kwargs.get('newline_genes', False)
strs = []
for g in individual.genotype:
strs.append('{}'.format(g.infix(**kwargs)))
if newline_genes:
return '\n+ '.join(strs)
else:
return ' + '.join(strs)
|
normal
|
{
"blob_id": "e53d4bb853eb54e4dfedf7126480e2c3e1af1378",
"index": 2825,
"step-1": "<mask token>\n\n\nclass RegressionFitness(evo.Fitness):\n <mask token>\n\n def __init__(self, train_inputs, train_output, error_fitness,\n handled_errors, stats: evo.utils.stats.Stats=None, store_bsfs: bool\n =True, fitness_measure: evo.sr.ErrorMeasure=evo.sr.ErrorMeasure.R2):\n super().__init__(store_bsfs)\n self.train_inputs = train_inputs\n self.train_output = numpy.array(train_output, copy=False)\n self.ssw = numpy.sum((self.train_output - self.train_output.mean()) **\n 2)\n self.error_fitness = error_fitness\n self.errors = tuple([evo.UnevaluableError] + handled_errors)\n self.stats = stats\n self.fitness_measure = fitness_measure\n\n def evaluate_individual(self, individual: evo.gp.support.\n ForestIndividual, context=None):\n assert individual.genes_num == 1\n RegressionFitness.LOG.debug('Evaluating individual %s in context %s',\n individual.__str__(), str(context))\n try:\n output = self.get_eval(individual, self.train_inputs)\n fitness = self.get_error(output, individual)\n individual.set_fitness(fitness)\n except self.errors as _:\n RegressionFitness.LOG.debug(\n 'Exception occurred during evaluation, assigning fitness %f',\n self.error_fitness, exc_info=True)\n fitness = self.error_fitness\n individual.set_fitness(fitness)\n return individual.get_fitness()\n <mask token>\n\n def get_eval(self, individual: evo.gp.support.ForestIndividual, args):\n return individual.genotype[0].eval(args=args)\n\n def get_error(self, output, individual: evo.gp.support.ForestIndividual):\n e = self.train_output - output\n ae = numpy.abs(e)\n sse = e.dot(e)\n r2 = 1 - sse / self.ssw\n mse = sse / numpy.alen(e)\n mae = numpy.sum(ae) / numpy.alen(e)\n worst_case_ae = ae.max()\n individual.set_data('R2', r2)\n individual.set_data('MSE', mse)\n individual.set_data('MAE', mae)\n individual.set_data('WORST_CASE_AE', worst_case_ae)\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n return r2\n if self.fitness_measure is evo.sr.ErrorMeasure.MSE:\n return mse\n if self.fitness_measure is evo.sr.ErrorMeasure.MAE:\n return mae\n if self.fitness_measure is evo.sr.ErrorMeasure.WORST_CASE_AE:\n return worst_case_ae\n raise ValueError('Invalid value of fitness_measure.')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RegressionFitness(evo.Fitness):\n <mask token>\n\n def __init__(self, train_inputs, train_output, error_fitness,\n handled_errors, stats: evo.utils.stats.Stats=None, store_bsfs: bool\n =True, fitness_measure: evo.sr.ErrorMeasure=evo.sr.ErrorMeasure.R2):\n super().__init__(store_bsfs)\n self.train_inputs = train_inputs\n self.train_output = numpy.array(train_output, copy=False)\n self.ssw = numpy.sum((self.train_output - self.train_output.mean()) **\n 2)\n self.error_fitness = error_fitness\n self.errors = tuple([evo.UnevaluableError] + handled_errors)\n self.stats = stats\n self.fitness_measure = fitness_measure\n\n def evaluate_individual(self, individual: evo.gp.support.\n ForestIndividual, context=None):\n assert individual.genes_num == 1\n RegressionFitness.LOG.debug('Evaluating individual %s in context %s',\n individual.__str__(), str(context))\n try:\n output = self.get_eval(individual, self.train_inputs)\n fitness = self.get_error(output, individual)\n individual.set_fitness(fitness)\n except self.errors as _:\n RegressionFitness.LOG.debug(\n 'Exception occurred during evaluation, assigning fitness %f',\n self.error_fitness, exc_info=True)\n fitness = self.error_fitness\n individual.set_fitness(fitness)\n return individual.get_fitness()\n\n def compare(self, i1: evo.gp.support.ForestIndividual, i2: evo.gp.\n support.ForestIndividual, context=None):\n f1 = i1.get_fitness()\n f2 = i2.get_fitness()\n if f1 is None and f2 is not None:\n raise ValueError('First individual has no fitness.')\n if f1 is not None and f2 is None:\n raise ValueError('Second individual has no fitness.')\n if f1 is None and f2 is None:\n raise ValueError('Neither individual has fitness.')\n return self.fitness_cmp(f1, f2)\n\n def get_eval(self, individual: evo.gp.support.ForestIndividual, args):\n return individual.genotype[0].eval(args=args)\n\n def get_error(self, output, individual: evo.gp.support.ForestIndividual):\n e = self.train_output - output\n ae = numpy.abs(e)\n sse = e.dot(e)\n r2 = 1 - sse / self.ssw\n mse = sse / numpy.alen(e)\n mae = numpy.sum(ae) / numpy.alen(e)\n worst_case_ae = ae.max()\n individual.set_data('R2', r2)\n individual.set_data('MSE', mse)\n individual.set_data('MAE', mae)\n individual.set_data('WORST_CASE_AE', worst_case_ae)\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n return r2\n if self.fitness_measure is evo.sr.ErrorMeasure.MSE:\n return mse\n if self.fitness_measure is evo.sr.ErrorMeasure.MAE:\n return mae\n if self.fitness_measure is evo.sr.ErrorMeasure.WORST_CASE_AE:\n return worst_case_ae\n raise ValueError('Invalid value of fitness_measure.')\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RegressionFitness(evo.Fitness):\n <mask token>\n\n def __init__(self, train_inputs, train_output, error_fitness,\n handled_errors, stats: evo.utils.stats.Stats=None, store_bsfs: bool\n =True, fitness_measure: evo.sr.ErrorMeasure=evo.sr.ErrorMeasure.R2):\n super().__init__(store_bsfs)\n self.train_inputs = train_inputs\n self.train_output = numpy.array(train_output, copy=False)\n self.ssw = numpy.sum((self.train_output - self.train_output.mean()) **\n 2)\n self.error_fitness = error_fitness\n self.errors = tuple([evo.UnevaluableError] + handled_errors)\n self.stats = stats\n self.fitness_measure = fitness_measure\n\n def evaluate_individual(self, individual: evo.gp.support.\n ForestIndividual, context=None):\n assert individual.genes_num == 1\n RegressionFitness.LOG.debug('Evaluating individual %s in context %s',\n individual.__str__(), str(context))\n try:\n output = self.get_eval(individual, self.train_inputs)\n fitness = self.get_error(output, individual)\n individual.set_fitness(fitness)\n except self.errors as _:\n RegressionFitness.LOG.debug(\n 'Exception occurred during evaluation, assigning fitness %f',\n self.error_fitness, exc_info=True)\n fitness = self.error_fitness\n individual.set_fitness(fitness)\n return individual.get_fitness()\n\n def compare(self, i1: evo.gp.support.ForestIndividual, i2: evo.gp.\n support.ForestIndividual, context=None):\n f1 = i1.get_fitness()\n f2 = i2.get_fitness()\n if f1 is None and f2 is not None:\n raise ValueError('First individual has no fitness.')\n if f1 is not None and f2 is None:\n raise ValueError('Second individual has no fitness.')\n if f1 is None and f2 is None:\n raise ValueError('Neither individual has fitness.')\n return self.fitness_cmp(f1, f2)\n\n def get_eval(self, individual: evo.gp.support.ForestIndividual, args):\n return individual.genotype[0].eval(args=args)\n\n def get_error(self, output, individual: evo.gp.support.ForestIndividual):\n e = self.train_output - output\n ae = numpy.abs(e)\n sse = e.dot(e)\n r2 = 1 - sse / self.ssw\n mse = sse / numpy.alen(e)\n mae = numpy.sum(ae) / numpy.alen(e)\n worst_case_ae = ae.max()\n individual.set_data('R2', r2)\n individual.set_data('MSE', mse)\n individual.set_data('MAE', mae)\n individual.set_data('WORST_CASE_AE', worst_case_ae)\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n return r2\n if self.fitness_measure is evo.sr.ErrorMeasure.MSE:\n return mse\n if self.fitness_measure is evo.sr.ErrorMeasure.MAE:\n return mae\n if self.fitness_measure is evo.sr.ErrorMeasure.WORST_CASE_AE:\n return worst_case_ae\n raise ValueError('Invalid value of fitness_measure.')\n\n def fitness_cmp(self, f1, f2):\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n if f1 > f2:\n return -1\n if f1 < f2:\n return 1\n else:\n if f1 < f2:\n return -1\n if f1 > f2:\n return 1\n return 0\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass RegressionFitness(evo.Fitness):\n LOG = logging.getLogger(__name__ + '.RegressionFitness')\n\n def __init__(self, train_inputs, train_output, error_fitness,\n handled_errors, stats: evo.utils.stats.Stats=None, store_bsfs: bool\n =True, fitness_measure: evo.sr.ErrorMeasure=evo.sr.ErrorMeasure.R2):\n super().__init__(store_bsfs)\n self.train_inputs = train_inputs\n self.train_output = numpy.array(train_output, copy=False)\n self.ssw = numpy.sum((self.train_output - self.train_output.mean()) **\n 2)\n self.error_fitness = error_fitness\n self.errors = tuple([evo.UnevaluableError] + handled_errors)\n self.stats = stats\n self.fitness_measure = fitness_measure\n\n def evaluate_individual(self, individual: evo.gp.support.\n ForestIndividual, context=None):\n assert individual.genes_num == 1\n RegressionFitness.LOG.debug('Evaluating individual %s in context %s',\n individual.__str__(), str(context))\n try:\n output = self.get_eval(individual, self.train_inputs)\n fitness = self.get_error(output, individual)\n individual.set_fitness(fitness)\n except self.errors as _:\n RegressionFitness.LOG.debug(\n 'Exception occurred during evaluation, assigning fitness %f',\n self.error_fitness, exc_info=True)\n fitness = self.error_fitness\n individual.set_fitness(fitness)\n return individual.get_fitness()\n\n def compare(self, i1: evo.gp.support.ForestIndividual, i2: evo.gp.\n support.ForestIndividual, context=None):\n f1 = i1.get_fitness()\n f2 = i2.get_fitness()\n if f1 is None and f2 is not None:\n raise ValueError('First individual has no fitness.')\n if f1 is not None and f2 is None:\n raise ValueError('Second individual has no fitness.')\n if f1 is None and f2 is None:\n raise ValueError('Neither individual has fitness.')\n return self.fitness_cmp(f1, f2)\n\n def get_eval(self, individual: evo.gp.support.ForestIndividual, args):\n return individual.genotype[0].eval(args=args)\n\n def get_error(self, output, individual: evo.gp.support.ForestIndividual):\n e = self.train_output - output\n ae = numpy.abs(e)\n sse = e.dot(e)\n r2 = 1 - sse / self.ssw\n mse = sse / numpy.alen(e)\n mae = numpy.sum(ae) / numpy.alen(e)\n worst_case_ae = ae.max()\n individual.set_data('R2', r2)\n individual.set_data('MSE', mse)\n individual.set_data('MAE', mae)\n individual.set_data('WORST_CASE_AE', worst_case_ae)\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n return r2\n if self.fitness_measure is evo.sr.ErrorMeasure.MSE:\n return mse\n if self.fitness_measure is evo.sr.ErrorMeasure.MAE:\n return mae\n if self.fitness_measure is evo.sr.ErrorMeasure.WORST_CASE_AE:\n return worst_case_ae\n raise ValueError('Invalid value of fitness_measure.')\n\n def fitness_cmp(self, f1, f2):\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n if f1 > f2:\n return -1\n if f1 < f2:\n return 1\n else:\n if f1 < f2:\n return -1\n if f1 > f2:\n return 1\n return 0\n\n\ndef full_model_str(individual: evo.gp.support.ForestIndividual, **kwargs\n ) ->str:\n newline_genes = kwargs.get('newline_genes', False)\n strs = []\n for g in individual.genotype:\n strs.append('{}'.format(g.infix(**kwargs)))\n if newline_genes:\n return '\\n+ '.join(strs)\n else:\n return ' + '.join(strs)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"TODO\n\"\"\"\n\nimport logging\n\nimport numpy\n\nimport evo.gp.support\nimport evo.sr\nimport evo.utils.stats\n\n\nclass RegressionFitness(evo.Fitness):\n LOG = logging.getLogger(__name__ + '.RegressionFitness')\n\n def __init__(self, train_inputs, train_output, error_fitness,\n handled_errors, stats: evo.utils.stats.Stats=None,\n store_bsfs: bool=True,\n fitness_measure: evo.sr.ErrorMeasure=evo.sr.ErrorMeasure.R2):\n super().__init__(store_bsfs)\n self.train_inputs = train_inputs\n self.train_output = numpy.array(train_output, copy=False)\n self.ssw = numpy.sum(\n (self.train_output - self.train_output.mean()) ** 2)\n self.error_fitness = error_fitness\n self.errors = tuple([evo.UnevaluableError] + handled_errors)\n self.stats = stats\n self.fitness_measure = fitness_measure\n\n def evaluate_individual(self, individual: evo.gp.support.ForestIndividual,\n context=None):\n assert individual.genes_num == 1\n RegressionFitness.LOG.debug(\n 'Evaluating individual %s in context %s', individual.__str__(),\n str(context))\n\n try:\n output = self.get_eval(individual, self.train_inputs)\n fitness = self.get_error(output, individual)\n individual.set_fitness(fitness)\n except self.errors as _:\n RegressionFitness.LOG.debug(\n 'Exception occurred during evaluation, assigning fitness %f',\n self.error_fitness, exc_info=True)\n fitness = self.error_fitness\n individual.set_fitness(fitness)\n return individual.get_fitness()\n\n def compare(self, i1: evo.gp.support.ForestIndividual,\n i2: evo.gp.support.ForestIndividual, context=None):\n f1 = i1.get_fitness()\n f2 = i2.get_fitness()\n if f1 is None and f2 is not None:\n raise ValueError('First individual has no fitness.')\n if f1 is not None and f2 is None:\n raise ValueError('Second individual has no fitness.')\n if f1 is None and f2 is None:\n raise ValueError('Neither individual has fitness.')\n\n return self.fitness_cmp(f1, f2)\n\n def get_eval(self, individual: evo.gp.support.ForestIndividual,\n args):\n return individual.genotype[0].eval(args=args)\n\n def get_error(self, output, individual: evo.gp.support.ForestIndividual):\n e = self.train_output - output\n ae = numpy.abs(e)\n sse = e.dot(e)\n r2 = 1 - sse / self.ssw\n mse = sse / numpy.alen(e)\n mae = numpy.sum(ae) / numpy.alen(e)\n worst_case_ae = ae.max()\n individual.set_data('R2', r2)\n individual.set_data('MSE', mse)\n individual.set_data('MAE', mae)\n individual.set_data('WORST_CASE_AE', worst_case_ae)\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n return r2\n if self.fitness_measure is evo.sr.ErrorMeasure.MSE:\n return mse\n if self.fitness_measure is evo.sr.ErrorMeasure.MAE:\n return mae\n if self.fitness_measure is evo.sr.ErrorMeasure.WORST_CASE_AE:\n return worst_case_ae\n raise ValueError('Invalid value of fitness_measure.')\n\n def fitness_cmp(self, f1, f2):\n if self.fitness_measure is evo.sr.ErrorMeasure.R2:\n if f1 > f2:\n return -1\n if f1 < f2:\n return 1\n else:\n if f1 < f2:\n return -1\n if f1 > f2:\n return 1\n return 0\n\n\ndef full_model_str(individual: evo.gp.support.ForestIndividual,\n **kwargs) -> str:\n newline_genes = kwargs.get('newline_genes', False)\n strs = []\n for g in individual.genotype:\n strs.append('{}'.format(g.infix(**kwargs)))\n if newline_genes:\n return '\\n+ '.join(strs)\n else:\n return ' + '.join(strs)\n",
"step-ids": [
5,
6,
7,
9,
11
]
}
|
[
5,
6,
7,
9,
11
] |
' a test module '
__author__ = 'Aaron Jiang'
import sys
def test():
args = sys.argv
if len(args) == 1:
print('Hello World')
elif len(args) == 2:
print('Hello, %s!' % args[1])
else:
print('TOO MANY ARGUMENTS!')
if __name__ == '__main__':
test()
class Test():
count = 0
print('called ', count)
def __init__(self, name):
self.__name = name
Test.count += 1
t1 = Test('Aaron')
print(t1.count)
Test.count = 10
t2 = Test('Aaron2')
print(t2.count)
class Screen:
@property
def width(self):
return self._width
@width.setter
def width(self, width):
self._width = width
@property
def height(self):
return self.__height
@height.setter
def height(self, height):
self.__height = height
@property
def resolution(self):
return self._width * self.__height
sc = Screen()
sc.width = 1024
sc.height = 1
print(sc.resolution)
class Chain(object):
def __init__(self, path=''):
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
def __str__(self):
return self._path
__repr__ = __str__
print(Chain('/nan').status.user.timeline.list)
|
normal
|
{
"blob_id": "ececcf40005054e26e21152bcb5e68a1bce33e88",
"index": 7947,
"step-1": "<mask token>\n\n\nclass Test:\n <mask token>\n print('called ', count)\n <mask token>\n\n\n<mask token>\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\n<mask token>\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test:\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\n<mask token>\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\n<mask token>\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test():\n args = sys.argv\n if len(args) == 1:\n print('Hello World')\n elif len(args) == 2:\n print('Hello, %s!' % args[1])\n else:\n print('TOO MANY ARGUMENTS!')\n\n\n<mask token>\n\n\nclass Test:\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\n<mask token>\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\n<mask token>\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\n<mask token>\n",
"step-4": "<mask token>\n__author__ = 'Aaron Jiang'\n<mask token>\n\n\ndef test():\n args = sys.argv\n if len(args) == 1:\n print('Hello World')\n elif len(args) == 2:\n print('Hello, %s!' % args[1])\n else:\n print('TOO MANY ARGUMENTS!')\n\n\nif __name__ == '__main__':\n test()\n\n\nclass Test:\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\nt1 = Test('Aaron')\nprint(t1.count)\nTest.count = 10\nt2 = Test('Aaron2')\nprint(t2.count)\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\nsc = Screen()\nsc.width = 1024\nsc.height = 1\nprint(sc.resolution)\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\nprint(Chain('/nan').status.user.timeline.list)\n",
"step-5": "' a test module '\n\n__author__ = 'Aaron Jiang'\n\nimport sys\n\ndef test():\n args = sys.argv\n if len(args) == 1:\n print('Hello World')\n elif len(args) == 2:\n print('Hello, %s!' % args[1])\n else:\n print('TOO MANY ARGUMENTS!')\n\n\nif __name__ == '__main__':\n test()\n\n\nclass Test():\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\nt1 = Test('Aaron')\nprint(t1.count)\n\nTest.count = 10\n\nt2 = Test('Aaron2')\nprint(t2.count)\n\n\nclass Screen:\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\nsc = Screen()\nsc.width = 1024\nsc.height = 1\nprint(sc.resolution)\n\nclass Chain(object):\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n\n __repr__ = __str__\n\n\nprint(Chain('/nan').status.user.timeline.list)\n",
"step-ids": [
12,
14,
15,
17,
19
]
}
|
[
12,
14,
15,
17,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
path_clusters = snakemake.input[0]
path_clusters = '/'.join(path_clusters.split('/')[:-1]) + '/'
merge_vcf = snakemake.output[0]
ref_genome = snakemake.params[0]
regions = snakemake.params[1]
threads = snakemake.params[2]
vcf_list = []
bam_files = [(path_clusters + bam) for bam in os.listdir(path_clusters) if
bam.endswith('.bam')]
if len(bam_files) > 0:
for bam_file in bam_files:
vcf_file = bam_file + '.vcf'
vcf_list.append(vcf_file)
cmd = (
'./scripts/freebayes-parallel.sh {} {} -f {} {} -C 2 -iXu > {}'
.format(regions, threads, ref_genome, bam_file, vcf_file))
subprocess.call(cmd, shell=True)
if len(vcf_list) > 0:
args_input = ''
for vcf in vcf_list:
args_input += 'I=' + vcf + ' '
cmd = 'java -jar ./software/picard.jar MergeVcfs {} O={}'.format(
args_input, merge_vcf)
subprocess.call(cmd, shell=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import subprocess
import os
import numpy as np
if __name__ == '__main__':
path_clusters = snakemake.input[0]
path_clusters = '/'.join(path_clusters.split('/')[:-1]) + '/'
merge_vcf = snakemake.output[0]
ref_genome = snakemake.params[0]
regions = snakemake.params[1]
threads = snakemake.params[2]
vcf_list = []
bam_files = [(path_clusters + bam) for bam in os.listdir(path_clusters) if
bam.endswith('.bam')]
if len(bam_files) > 0:
for bam_file in bam_files:
vcf_file = bam_file + '.vcf'
vcf_list.append(vcf_file)
cmd = (
'./scripts/freebayes-parallel.sh {} {} -f {} {} -C 2 -iXu > {}'
.format(regions, threads, ref_genome, bam_file, vcf_file))
subprocess.call(cmd, shell=True)
if len(vcf_list) > 0:
args_input = ''
for vcf in vcf_list:
args_input += 'I=' + vcf + ' '
cmd = 'java -jar ./software/picard.jar MergeVcfs {} O={}'.format(
args_input, merge_vcf)
subprocess.call(cmd, shell=True)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 15:21:29 2021
@author: diego
"""
import subprocess
import os
import numpy as np
if __name__ == "__main__":
path_clusters = snakemake.input[0]
path_clusters = "/".join(path_clusters.split("/")[:-1]) + "/"
merge_vcf = snakemake.output[0]
ref_genome = snakemake.params[0]
regions = snakemake.params[1]
threads = snakemake.params[2]
vcf_list = []
bam_files = [path_clusters + bam for bam in os.listdir(path_clusters) if bam.endswith(".bam")]
if len(bam_files) > 0:
for bam_file in bam_files:
vcf_file = bam_file + ".vcf"
vcf_list.append(vcf_file)
cmd = "./scripts/freebayes-parallel.sh {} {} -f {} {} -C 2 -iXu > {}".format(
regions, threads, ref_genome, bam_file, vcf_file)
subprocess.call(cmd, shell = True)
if len(vcf_list) > 0:
args_input = ""
for vcf in vcf_list:
args_input += "I="+vcf+" "
#cmd = "PicardCommandLine MergeVcfs {} O={}".format(args_input, merge_vcf)
cmd = "java -jar ./software/picard.jar MergeVcfs {} O={}".format(args_input, merge_vcf)
subprocess.call(cmd, shell = True)
|
flexible
|
{
"blob_id": "f6d81387f61ac4150cd6279121780b7113517b1e",
"index": 2860,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n path_clusters = snakemake.input[0]\n path_clusters = '/'.join(path_clusters.split('/')[:-1]) + '/'\n merge_vcf = snakemake.output[0]\n ref_genome = snakemake.params[0]\n regions = snakemake.params[1]\n threads = snakemake.params[2]\n vcf_list = []\n bam_files = [(path_clusters + bam) for bam in os.listdir(path_clusters) if\n bam.endswith('.bam')]\n if len(bam_files) > 0:\n for bam_file in bam_files:\n vcf_file = bam_file + '.vcf'\n vcf_list.append(vcf_file)\n cmd = (\n './scripts/freebayes-parallel.sh {} {} -f {} {} -C 2 -iXu > {}'\n .format(regions, threads, ref_genome, bam_file, vcf_file))\n subprocess.call(cmd, shell=True)\n if len(vcf_list) > 0:\n args_input = ''\n for vcf in vcf_list:\n args_input += 'I=' + vcf + ' '\n cmd = 'java -jar ./software/picard.jar MergeVcfs {} O={}'.format(\n args_input, merge_vcf)\n subprocess.call(cmd, shell=True)\n",
"step-3": "<mask token>\nimport subprocess\nimport os\nimport numpy as np\nif __name__ == '__main__':\n path_clusters = snakemake.input[0]\n path_clusters = '/'.join(path_clusters.split('/')[:-1]) + '/'\n merge_vcf = snakemake.output[0]\n ref_genome = snakemake.params[0]\n regions = snakemake.params[1]\n threads = snakemake.params[2]\n vcf_list = []\n bam_files = [(path_clusters + bam) for bam in os.listdir(path_clusters) if\n bam.endswith('.bam')]\n if len(bam_files) > 0:\n for bam_file in bam_files:\n vcf_file = bam_file + '.vcf'\n vcf_list.append(vcf_file)\n cmd = (\n './scripts/freebayes-parallel.sh {} {} -f {} {} -C 2 -iXu > {}'\n .format(regions, threads, ref_genome, bam_file, vcf_file))\n subprocess.call(cmd, shell=True)\n if len(vcf_list) > 0:\n args_input = ''\n for vcf in vcf_list:\n args_input += 'I=' + vcf + ' '\n cmd = 'java -jar ./software/picard.jar MergeVcfs {} O={}'.format(\n args_input, merge_vcf)\n subprocess.call(cmd, shell=True)\n",
"step-4": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 4 15:21:29 2021\n\n@author: diego\n\"\"\"\nimport subprocess\nimport os\nimport numpy as np\n\nif __name__ == \"__main__\": \n\n path_clusters = snakemake.input[0]\n path_clusters = \"/\".join(path_clusters.split(\"/\")[:-1]) + \"/\"\n merge_vcf = snakemake.output[0]\n ref_genome = snakemake.params[0]\n regions = snakemake.params[1]\n threads = snakemake.params[2] \n vcf_list = []\n bam_files = [path_clusters + bam for bam in os.listdir(path_clusters) if bam.endswith(\".bam\")] \n if len(bam_files) > 0:\n for bam_file in bam_files:\n vcf_file = bam_file + \".vcf\"\n vcf_list.append(vcf_file)\n cmd = \"./scripts/freebayes-parallel.sh {} {} -f {} {} -C 2 -iXu > {}\".format(\n regions, threads, ref_genome, bam_file, vcf_file) \n subprocess.call(cmd, shell = True) \n if len(vcf_list) > 0:\n args_input = \"\"\n for vcf in vcf_list:\n args_input += \"I=\"+vcf+\" \" \n #cmd = \"PicardCommandLine MergeVcfs {} O={}\".format(args_input, merge_vcf) \n cmd = \"java -jar ./software/picard.jar MergeVcfs {} O={}\".format(args_input, merge_vcf) \n subprocess.call(cmd, shell = True)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
DEBUG = True
SQLALCHEMY_DATABASE_URI = (
'postgresql://username:password@IPOrDomain/databasename')
SQLALCHEMY_TRACK_MODIFICATIONS = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 2
<|reserved_special_token_1|>
DEBUG = True
SQLALCHEMY_DATABASE_URI = "postgresql://username:password@IPOrDomain/databasename"
SQLALCHEMY_TRACK_MODIFICATIONS = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 2
|
flexible
|
{
"blob_id": "a1b0e72b62abc89d5292f199ec5b6193b544e271",
"index": 7813,
"step-1": "<mask token>\n",
"step-2": "DEBUG = True\nSQLALCHEMY_DATABASE_URI = (\n 'postgresql://username:password@IPOrDomain/databasename')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 2\n",
"step-3": "DEBUG = True\nSQLALCHEMY_DATABASE_URI = \"postgresql://username:password@IPOrDomain/databasename\"\n\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 2\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def sort_position_data(pos, type='A'):
"""Sorts the position data according to player positions.
As the final matrix should contain the player according to their
position starting from left to right from back to front the indexed
ragged array list should be sorted such that the entries match
this format.
Args:
pos: The list with tuples containing the position data and the
playing position.
type: The type of position rankings used by the tracking system.
Type A is default.
Returns:
The sorted list.
"""
ranking_type = __position_ranking[type]
return sorted(pos, key=lambda player: ranking_type[player[2]])
<|reserved_special_token_0|>
def determine_playing_direction(goalie):
""" Determines the teams' playing direction.
Determines the playing direction using
the average position of the goalie.
Args:
goalie: x-y position of goalie
Returns:
either 'l2r': left to right or 'r2l': right to left.
"""
return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'
<|reserved_special_token_0|>
def rescale_playing_coords(position_coords, pitch_dim):
"""Relocates the origin to left-bottom and rescales to [0,10] height/width.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords:
pitch_dim:
Returns:
Nothing, the matrix coordinates are scaled in place.
"""
pitch_width = pitch_dim['width']
pitch_length = pitch_dim['length']
position_coords[:, 0::2] += pitch_length / 2
position_coords[:, 1::2] += pitch_width / 2
position_coords[:, 0::2] *= 10.0 / pitch_length
position_coords[:, 1::2] *= 10.0 / pitch_width
def clamp_values(result, vmin=0.0, vmax=10.0):
"""Clamps the position values to [0,10]
Args:
result:
vmin: minimum value
vmax = maximum value
Returns:
None. Matrix is clamped in place.
"""
for entry in result:
for ht in result[entry]:
ht[ht < vmin] = vmin
ht[ht > vmax] = vmax
def run(pos_data, ball_data, match, ranking_type='A'):
"""Driver routine to run all processing steps.
Args:
ranking_type: Specifies which postion_ranking system should be used.
Returns:
"""
roles = ['home', 'guest']
sections = ['1st', '2nd']
result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}
l2r_section = 0
for sec in sections:
home_direction = 'r2l'
for role in roles:
print('Processing: %s-%s...' % (role, sec))
sorted_pos_data = sort_position_data(pos_data[role][sec],
ranking_type)
stitched_data = stitch_position_data(sorted_pos_data, ball_data
[sec != '1st'])
if role == 'home':
home_direction = determine_playing_direction(stitched_data[
:, 0:2])
if home_direction == 'l2r':
switch_playing_direction(stitched_data)
l2r_section = 0 if sec == '1st' else 1
rescale_playing_coords(stitched_data, match['stadium'])
result[role][0 if sec == '1st' else 1] = stitched_data
print('done')
print('Processing ball...')
switch_playing_direction(ball_data[l2r_section][:, 1:3])
for i in [0, 1]:
rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])
result['ball'][0] = ball_data[0][:, 1:3]
result['ball'][1] = ball_data[1][:, 1:3]
print('clamping values.')
clamp_values(result)
print('done.')
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sort_position_data(pos, type='A'):
"""Sorts the position data according to player positions.
As the final matrix should contain the player according to their
position starting from left to right from back to front the indexed
ragged array list should be sorted such that the entries match
this format.
Args:
pos: The list with tuples containing the position data and the
playing position.
type: The type of position rankings used by the tracking system.
Type A is default.
Returns:
The sorted list.
"""
ranking_type = __position_ranking[type]
return sorted(pos, key=lambda player: ranking_type[player[2]])
def stitch_position_data(pos, ball, NO_PLAYERS=11):
"""Puts position data into a single array.
stitch_position_data does not change the ordering of the data and
stitches the position data together as given. Therefore, if the playing
position must be controlled sort_position_data must be called first.
Args:
pos: position data list (indexed ragged array)
ball: list with two matrices (1st and 2nd half)
NO_PLAYERS: default = 11
Returns:
output_fields:
"""
_MISSING_ = -2.0 ** 13
_NO_DIM_ = 2
_POST_LOOK_ = 20
frames = ball[:, 0]
min_frame = min(frames)
max_frame = max(frames)
no_frames = ball.shape[0]
if no_frames != max_frame - min_frame + 1:
raise IndexError("No of ball frames doesn't match")
no_players_input = len(pos)
input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[
1], _MISSING_)
input_fields_clean = ra.drop_expanded_ragged_entries(input_fields,
NO_PLAYERS * _NO_DIM_, _MISSING_)
output_fields = ra.condense_expanded_ragged_array(input_fields,
missing_id=_MISSING_)
return output_fields
def determine_playing_direction(goalie):
""" Determines the teams' playing direction.
Determines the playing direction using
the average position of the goalie.
Args:
goalie: x-y position of goalie
Returns:
either 'l2r': left to right or 'r2l': right to left.
"""
return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'
def switch_playing_direction(position_coords):
"""Switches the position coordinates.
Mirrors the position coordinates either from left to right or vice versa.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords: x-y position coordinates of the players.
Returns:
Nothing, the matrix coordinates are flipped in place.
"""
position_coords[:, 0::2] *= -1
def rescale_playing_coords(position_coords, pitch_dim):
"""Relocates the origin to left-bottom and rescales to [0,10] height/width.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords:
pitch_dim:
Returns:
Nothing, the matrix coordinates are scaled in place.
"""
pitch_width = pitch_dim['width']
pitch_length = pitch_dim['length']
position_coords[:, 0::2] += pitch_length / 2
position_coords[:, 1::2] += pitch_width / 2
position_coords[:, 0::2] *= 10.0 / pitch_length
position_coords[:, 1::2] *= 10.0 / pitch_width
def clamp_values(result, vmin=0.0, vmax=10.0):
"""Clamps the position values to [0,10]
Args:
result:
vmin: minimum value
vmax = maximum value
Returns:
None. Matrix is clamped in place.
"""
for entry in result:
for ht in result[entry]:
ht[ht < vmin] = vmin
ht[ht > vmax] = vmax
def run(pos_data, ball_data, match, ranking_type='A'):
"""Driver routine to run all processing steps.
Args:
ranking_type: Specifies which postion_ranking system should be used.
Returns:
"""
roles = ['home', 'guest']
sections = ['1st', '2nd']
result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}
l2r_section = 0
for sec in sections:
home_direction = 'r2l'
for role in roles:
print('Processing: %s-%s...' % (role, sec))
sorted_pos_data = sort_position_data(pos_data[role][sec],
ranking_type)
stitched_data = stitch_position_data(sorted_pos_data, ball_data
[sec != '1st'])
if role == 'home':
home_direction = determine_playing_direction(stitched_data[
:, 0:2])
if home_direction == 'l2r':
switch_playing_direction(stitched_data)
l2r_section = 0 if sec == '1st' else 1
rescale_playing_coords(stitched_data, match['stadium'])
result[role][0 if sec == '1st' else 1] = stitched_data
print('done')
print('Processing ball...')
switch_playing_direction(ball_data[l2r_section][:, 1:3])
for i in [0, 1]:
rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])
result['ball'][0] = ball_data[0][:, 1:3]
result['ball'][1] = ball_data[1][:, 1:3]
print('clamping values.')
clamp_values(result)
print('done.')
return result
if __name__ == '__main__':
section = '2nd'
kk = pos_data['home'][section]
kks = sort_position_data(kk)
bb = ball_data[section != '1st']
ss = stitch_position_data(kks, bb)
data_transformed = run(pos_data, ball_data, match)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__position_ranking = {'A': {'TW': 1, 'LV': 2, 'IVL': 3, 'IVZ': 4, 'IVR': 5,
'RV': 6, 'DML': 7, 'DMZ': 8, 'DMR': 9, 'LM': 10, 'HL': 11, 'MZ': 12,
'HR': 13, 'RM': 14, 'OLM': 15, 'ZO': 16, 'ORM': 17, 'HST': 18, 'LA': 19,
'STL': 20, 'STR': 21, 'RA': 22, 'STZ': 23}, 'B': {'G': 1, 'D': 2, 'M':
3, 'A': 4}, 'C': {'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,
'forward': 4}}
def sort_position_data(pos, type='A'):
"""Sorts the position data according to player positions.
As the final matrix should contain the player according to their
position starting from left to right from back to front the indexed
ragged array list should be sorted such that the entries match
this format.
Args:
pos: The list with tuples containing the position data and the
playing position.
type: The type of position rankings used by the tracking system.
Type A is default.
Returns:
The sorted list.
"""
ranking_type = __position_ranking[type]
return sorted(pos, key=lambda player: ranking_type[player[2]])
def stitch_position_data(pos, ball, NO_PLAYERS=11):
"""Puts position data into a single array.
stitch_position_data does not change the ordering of the data and
stitches the position data together as given. Therefore, if the playing
position must be controlled sort_position_data must be called first.
Args:
pos: position data list (indexed ragged array)
ball: list with two matrices (1st and 2nd half)
NO_PLAYERS: default = 11
Returns:
output_fields:
"""
_MISSING_ = -2.0 ** 13
_NO_DIM_ = 2
_POST_LOOK_ = 20
frames = ball[:, 0]
min_frame = min(frames)
max_frame = max(frames)
no_frames = ball.shape[0]
if no_frames != max_frame - min_frame + 1:
raise IndexError("No of ball frames doesn't match")
no_players_input = len(pos)
input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[
1], _MISSING_)
input_fields_clean = ra.drop_expanded_ragged_entries(input_fields,
NO_PLAYERS * _NO_DIM_, _MISSING_)
output_fields = ra.condense_expanded_ragged_array(input_fields,
missing_id=_MISSING_)
return output_fields
def determine_playing_direction(goalie):
""" Determines the teams' playing direction.
Determines the playing direction using
the average position of the goalie.
Args:
goalie: x-y position of goalie
Returns:
either 'l2r': left to right or 'r2l': right to left.
"""
return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'
def switch_playing_direction(position_coords):
"""Switches the position coordinates.
Mirrors the position coordinates either from left to right or vice versa.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords: x-y position coordinates of the players.
Returns:
Nothing, the matrix coordinates are flipped in place.
"""
position_coords[:, 0::2] *= -1
def rescale_playing_coords(position_coords, pitch_dim):
"""Relocates the origin to left-bottom and rescales to [0,10] height/width.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords:
pitch_dim:
Returns:
Nothing, the matrix coordinates are scaled in place.
"""
pitch_width = pitch_dim['width']
pitch_length = pitch_dim['length']
position_coords[:, 0::2] += pitch_length / 2
position_coords[:, 1::2] += pitch_width / 2
position_coords[:, 0::2] *= 10.0 / pitch_length
position_coords[:, 1::2] *= 10.0 / pitch_width
def clamp_values(result, vmin=0.0, vmax=10.0):
"""Clamps the position values to [0,10]
Args:
result:
vmin: minimum value
vmax = maximum value
Returns:
None. Matrix is clamped in place.
"""
for entry in result:
for ht in result[entry]:
ht[ht < vmin] = vmin
ht[ht > vmax] = vmax
def run(pos_data, ball_data, match, ranking_type='A'):
"""Driver routine to run all processing steps.
Args:
ranking_type: Specifies which postion_ranking system should be used.
Returns:
"""
roles = ['home', 'guest']
sections = ['1st', '2nd']
result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}
l2r_section = 0
for sec in sections:
home_direction = 'r2l'
for role in roles:
print('Processing: %s-%s...' % (role, sec))
sorted_pos_data = sort_position_data(pos_data[role][sec],
ranking_type)
stitched_data = stitch_position_data(sorted_pos_data, ball_data
[sec != '1st'])
if role == 'home':
home_direction = determine_playing_direction(stitched_data[
:, 0:2])
if home_direction == 'l2r':
switch_playing_direction(stitched_data)
l2r_section = 0 if sec == '1st' else 1
rescale_playing_coords(stitched_data, match['stadium'])
result[role][0 if sec == '1st' else 1] = stitched_data
print('done')
print('Processing ball...')
switch_playing_direction(ball_data[l2r_section][:, 1:3])
for i in [0, 1]:
rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])
result['ball'][0] = ball_data[0][:, 1:3]
result['ball'][1] = ball_data[1][:, 1:3]
print('clamping values.')
clamp_values(result)
print('done.')
return result
if __name__ == '__main__':
section = '2nd'
kk = pos_data['home'][section]
kks = sort_position_data(kk)
bb = ball_data[section != '1st']
ss = stitch_position_data(kks, bb)
data_transformed = run(pos_data, ball_data, match)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import print_function
import numpy as np
import footballpy.processing.ragged_array as ra
<|reserved_special_token_0|>
__position_ranking = {'A': {'TW': 1, 'LV': 2, 'IVL': 3, 'IVZ': 4, 'IVR': 5,
'RV': 6, 'DML': 7, 'DMZ': 8, 'DMR': 9, 'LM': 10, 'HL': 11, 'MZ': 12,
'HR': 13, 'RM': 14, 'OLM': 15, 'ZO': 16, 'ORM': 17, 'HST': 18, 'LA': 19,
'STL': 20, 'STR': 21, 'RA': 22, 'STZ': 23}, 'B': {'G': 1, 'D': 2, 'M':
3, 'A': 4}, 'C': {'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,
'forward': 4}}
def sort_position_data(pos, type='A'):
"""Sorts the position data according to player positions.
As the final matrix should contain the player according to their
position starting from left to right from back to front the indexed
ragged array list should be sorted such that the entries match
this format.
Args:
pos: The list with tuples containing the position data and the
playing position.
type: The type of position rankings used by the tracking system.
Type A is default.
Returns:
The sorted list.
"""
ranking_type = __position_ranking[type]
return sorted(pos, key=lambda player: ranking_type[player[2]])
def stitch_position_data(pos, ball, NO_PLAYERS=11):
"""Puts position data into a single array.
stitch_position_data does not change the ordering of the data and
stitches the position data together as given. Therefore, if the playing
position must be controlled sort_position_data must be called first.
Args:
pos: position data list (indexed ragged array)
ball: list with two matrices (1st and 2nd half)
NO_PLAYERS: default = 11
Returns:
output_fields:
"""
_MISSING_ = -2.0 ** 13
_NO_DIM_ = 2
_POST_LOOK_ = 20
frames = ball[:, 0]
min_frame = min(frames)
max_frame = max(frames)
no_frames = ball.shape[0]
if no_frames != max_frame - min_frame + 1:
raise IndexError("No of ball frames doesn't match")
no_players_input = len(pos)
input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[
1], _MISSING_)
input_fields_clean = ra.drop_expanded_ragged_entries(input_fields,
NO_PLAYERS * _NO_DIM_, _MISSING_)
output_fields = ra.condense_expanded_ragged_array(input_fields,
missing_id=_MISSING_)
return output_fields
def determine_playing_direction(goalie):
""" Determines the teams' playing direction.
Determines the playing direction using
the average position of the goalie.
Args:
goalie: x-y position of goalie
Returns:
either 'l2r': left to right or 'r2l': right to left.
"""
return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'
def switch_playing_direction(position_coords):
"""Switches the position coordinates.
Mirrors the position coordinates either from left to right or vice versa.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords: x-y position coordinates of the players.
Returns:
Nothing, the matrix coordinates are flipped in place.
"""
position_coords[:, 0::2] *= -1
def rescale_playing_coords(position_coords, pitch_dim):
"""Relocates the origin to left-bottom and rescales to [0,10] height/width.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords:
pitch_dim:
Returns:
Nothing, the matrix coordinates are scaled in place.
"""
pitch_width = pitch_dim['width']
pitch_length = pitch_dim['length']
position_coords[:, 0::2] += pitch_length / 2
position_coords[:, 1::2] += pitch_width / 2
position_coords[:, 0::2] *= 10.0 / pitch_length
position_coords[:, 1::2] *= 10.0 / pitch_width
def clamp_values(result, vmin=0.0, vmax=10.0):
"""Clamps the position values to [0,10]
Args:
result:
vmin: minimum value
vmax = maximum value
Returns:
None. Matrix is clamped in place.
"""
for entry in result:
for ht in result[entry]:
ht[ht < vmin] = vmin
ht[ht > vmax] = vmax
def run(pos_data, ball_data, match, ranking_type='A'):
"""Driver routine to run all processing steps.
Args:
ranking_type: Specifies which postion_ranking system should be used.
Returns:
"""
roles = ['home', 'guest']
sections = ['1st', '2nd']
result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}
l2r_section = 0
for sec in sections:
home_direction = 'r2l'
for role in roles:
print('Processing: %s-%s...' % (role, sec))
sorted_pos_data = sort_position_data(pos_data[role][sec],
ranking_type)
stitched_data = stitch_position_data(sorted_pos_data, ball_data
[sec != '1st'])
if role == 'home':
home_direction = determine_playing_direction(stitched_data[
:, 0:2])
if home_direction == 'l2r':
switch_playing_direction(stitched_data)
l2r_section = 0 if sec == '1st' else 1
rescale_playing_coords(stitched_data, match['stadium'])
result[role][0 if sec == '1st' else 1] = stitched_data
print('done')
print('Processing ball...')
switch_playing_direction(ball_data[l2r_section][:, 1:3])
for i in [0, 1]:
rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])
result['ball'][0] = ball_data[0][:, 1:3]
result['ball'][1] = ball_data[1][:, 1:3]
print('clamping values.')
clamp_values(result)
print('done.')
return result
if __name__ == '__main__':
section = '2nd'
kk = pos_data['home'][section]
kks = sort_position_data(kk)
bb = ball_data[section != '1st']
ss = stitch_position_data(kks, bb)
data_transformed = run(pos_data, ball_data, match)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 23:54:17 2015
@author: rein
@license: MIT
@version: 0.1
"""
from __future__ import print_function
import numpy as np
import footballpy.processing.ragged_array as ra
""" Ranking dictionary necessary to determine the column number
of each player.
The type system depends on the type of the raw data.
Type A: Elaborate positioning scheme
Type B: Simple scheme
Type C: Amisco-scheme
"""
__position_ranking = {
'A': {
'TW':1, 'LV':2, 'IVL':3, 'IVZ':4, 'IVR':5, 'RV':6,
'DML':7, 'DMZ':8, 'DMR':9,
'LM':10, 'HL':11, 'MZ': 12, 'HR':13, 'RM':14,
'OLM':15, 'ZO':16, 'ORM':17,
'HST':18, 'LA':19, 'STL':20, 'STR':21, 'RA':22,
'STZ':23
},
'B': {
'G': 1, 'D': 2, 'M': 3, 'A': 4
},
'C': {
'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,
'forward': 4
}
}
def sort_position_data(pos,type='A'):
"""Sorts the position data according to player positions.
As the final matrix should contain the player according to their
position starting from left to right from back to front the indexed
ragged array list should be sorted such that the entries match
this format.
Args:
pos: The list with tuples containing the position data and the
playing position.
type: The type of position rankings used by the tracking system.
Type A is default.
Returns:
The sorted list.
"""
ranking_type = __position_ranking[type]
return sorted(pos,key=lambda player: ranking_type[player[2]])
def stitch_position_data(pos,ball,NO_PLAYERS=11):
"""Puts position data into a single array.
stitch_position_data does not change the ordering of the data and
stitches the position data together as given. Therefore, if the playing
position must be controlled sort_position_data must be called first.
Args:
pos: position data list (indexed ragged array)
ball: list with two matrices (1st and 2nd half)
NO_PLAYERS: default = 11
Returns:
output_fields:
"""
# magic numbers
_MISSING_ = -2.0**13
_NO_DIM_ = 2 # x- and y-coordinates
_POST_LOOK_ = 20
# end magic numbers
frames = ball[:,0]
min_frame = min(frames)
max_frame = max(frames)
no_frames = ball.shape[0]
if no_frames != (max_frame - min_frame + 1):
raise IndexError("No of ball frames doesn't match")
no_players_input = len(pos)
input_fields = ra.expand_indexed_ragged_array(pos, frames,
lambda x: x[1], _MISSING_)
input_fields_clean = ra.drop_expanded_ragged_entries(input_fields,NO_PLAYERS*_NO_DIM_,_MISSING_)
output_fields = ra.condense_expanded_ragged_array(input_fields, missing_id = _MISSING_)
return output_fields
def determine_playing_direction(goalie):
""" Determines the teams' playing direction.
Determines the playing direction using
the average position of the goalie.
Args:
goalie: x-y position of goalie
Returns:
either 'l2r': left to right or 'r2l': right to left.
"""
return 'l2r' if np.average(goalie[:,0]) < 0 else 'r2l'
def switch_playing_direction(position_coords):
"""Switches the position coordinates.
Mirrors the position coordinates either from left to right or vice versa.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords: x-y position coordinates of the players.
Returns:
Nothing, the matrix coordinates are flipped in place.
"""
# just mirrors the x-coordinate in place
position_coords[:,0::2] *= -1
def rescale_playing_coords(position_coords,pitch_dim):
"""Relocates the origin to left-bottom and rescales to [0,10] height/width.
The routine assumes that the origin (0,0) is localized at the width and
length midpoints.
-----------------
| |
|_ |
| | (0,0)
|_| |
| |
| |
-----------------
Args:
position_coords:
pitch_dim:
Returns:
Nothing, the matrix coordinates are scaled in place.
"""
pitch_width = pitch_dim['width']
pitch_length = pitch_dim['length']
# translate to bottom-left corner
position_coords[:,0::2] += pitch_length/2 # x-coordinates
position_coords[:,1::2] += pitch_width/2 # y-coordinates
# rescale to [0,10]
position_coords[:,0::2] *= 10.0/pitch_length # x-coordinates
position_coords[:,1::2] *= 10.0/pitch_width # y-coordinates
def clamp_values(result,vmin=0.0, vmax=10.0):
"""Clamps the position values to [0,10]
Args:
result:
vmin: minimum value
vmax = maximum value
Returns:
None. Matrix is clamped in place.
"""
for entry in result:
for ht in result[entry]:
ht[ht<vmin] = vmin
ht[ht>vmax] = vmax
def run(pos_data,ball_data,match,ranking_type='A'):
"""Driver routine to run all processing steps.
Args:
ranking_type: Specifies which postion_ranking system should be used.
Returns:
"""
roles = ['home','guest']
sections = ['1st','2nd']
result = {'home':[0]*2, 'guest':[0]*2, 'ball':[0]*2}
# switch for l2r switching mode
l2r_section = 0
# processing player position data first
for sec in sections:
home_direction = 'r2l'
for role in roles:
print('Processing: %s-%s...' % (role,sec))
sorted_pos_data = sort_position_data(pos_data[role][sec], ranking_type)
stitched_data = stitch_position_data(sorted_pos_data,ball_data[sec!='1st'])
if role == 'home':
home_direction = determine_playing_direction(stitched_data[:,0:2])
if home_direction == 'l2r':
switch_playing_direction(stitched_data)
l2r_section = 0 if sec=='1st' else 1
rescale_playing_coords(stitched_data,match['stadium'])
result[role][0 if sec=='1st' else 1] = stitched_data
print('done')
# processing ball data
print('Processing ball...')
switch_playing_direction(ball_data[l2r_section][:,1:3])
for i in [0,1]:
rescale_playing_coords(ball_data[i][:,1:3],match['stadium'])
result['ball'][0] = ball_data[0][:,1:3]
result['ball'][1] = ball_data[1][:,1:3]
#correct value ranges.
print('clamping values.')
clamp_values(result)
print('done.')
return result
if __name__ == '__main__':
#teams, match, pos_data,ball_data
section = '2nd'
kk = pos_data['home'][section]
kks = sort_position_data(kk)
bb = ball_data[section!='1st']
ss = stitch_position_data(kks,bb)
data_transformed = run(pos_data,ball_data,match)
|
flexible
|
{
"blob_id": "81ae5bbc8e3e712ee4f54656bc28f385a0b4a29f",
"index": 6059,
"step-1": "<mask token>\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\n<mask token>\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\n<mask token>\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos, ball, NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n _MISSING_ = -2.0 ** 13\n _NO_DIM_ = 2\n _POST_LOOK_ = 20\n frames = ball[:, 0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != max_frame - min_frame + 1:\n raise IndexError(\"No of ball frames doesn't match\")\n no_players_input = len(pos)\n input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[\n 1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields, \n NO_PLAYERS * _NO_DIM_, _MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields,\n missing_id=_MISSING_)\n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n position_coords[:, 0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\nif __name__ == '__main__':\n section = '2nd'\n kk = pos_data['home'][section]\n kks = sort_position_data(kk)\n bb = ball_data[section != '1st']\n ss = stitch_position_data(kks, bb)\n data_transformed = run(pos_data, ball_data, match)\n",
"step-3": "<mask token>\n__position_ranking = {'A': {'TW': 1, 'LV': 2, 'IVL': 3, 'IVZ': 4, 'IVR': 5,\n 'RV': 6, 'DML': 7, 'DMZ': 8, 'DMR': 9, 'LM': 10, 'HL': 11, 'MZ': 12,\n 'HR': 13, 'RM': 14, 'OLM': 15, 'ZO': 16, 'ORM': 17, 'HST': 18, 'LA': 19,\n 'STL': 20, 'STR': 21, 'RA': 22, 'STZ': 23}, 'B': {'G': 1, 'D': 2, 'M': \n 3, 'A': 4}, 'C': {'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,\n 'forward': 4}}\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos, ball, NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n _MISSING_ = -2.0 ** 13\n _NO_DIM_ = 2\n _POST_LOOK_ = 20\n frames = ball[:, 0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != max_frame - min_frame + 1:\n raise IndexError(\"No of ball frames doesn't match\")\n no_players_input = len(pos)\n input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[\n 1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields, \n NO_PLAYERS * _NO_DIM_, _MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields,\n missing_id=_MISSING_)\n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n position_coords[:, 0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\nif __name__ == '__main__':\n section = '2nd'\n kk = pos_data['home'][section]\n kks = sort_position_data(kk)\n bb = ball_data[section != '1st']\n ss = stitch_position_data(kks, bb)\n data_transformed = run(pos_data, ball_data, match)\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport numpy as np\nimport footballpy.processing.ragged_array as ra\n<mask token>\n__position_ranking = {'A': {'TW': 1, 'LV': 2, 'IVL': 3, 'IVZ': 4, 'IVR': 5,\n 'RV': 6, 'DML': 7, 'DMZ': 8, 'DMR': 9, 'LM': 10, 'HL': 11, 'MZ': 12,\n 'HR': 13, 'RM': 14, 'OLM': 15, 'ZO': 16, 'ORM': 17, 'HST': 18, 'LA': 19,\n 'STL': 20, 'STR': 21, 'RA': 22, 'STZ': 23}, 'B': {'G': 1, 'D': 2, 'M': \n 3, 'A': 4}, 'C': {'goalie': 1, 'defenseman': 2, 'mid-fielder': 3,\n 'forward': 4}}\n\n\ndef sort_position_data(pos, type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos, key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos, ball, NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n _MISSING_ = -2.0 ** 13\n _NO_DIM_ = 2\n _POST_LOOK_ = 20\n frames = ball[:, 0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != max_frame - min_frame + 1:\n raise IndexError(\"No of ball frames doesn't match\")\n no_players_input = len(pos)\n input_fields = ra.expand_indexed_ragged_array(pos, frames, lambda x: x[\n 1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields, \n NO_PLAYERS * _NO_DIM_, _MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields,\n missing_id=_MISSING_)\n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:, 0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n position_coords[:, 0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords, pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n position_coords[:, 0::2] += pitch_length / 2\n position_coords[:, 1::2] += pitch_width / 2\n position_coords[:, 0::2] *= 10.0 / pitch_length\n position_coords[:, 1::2] *= 10.0 / pitch_width\n\n\ndef clamp_values(result, vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht < vmin] = vmin\n ht[ht > vmax] = vmax\n\n\ndef run(pos_data, ball_data, match, ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home', 'guest']\n sections = ['1st', '2nd']\n result = {'home': [0] * 2, 'guest': [0] * 2, 'ball': [0] * 2}\n l2r_section = 0\n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role, sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec],\n ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data, ball_data\n [sec != '1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[\n :, 0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec == '1st' else 1\n rescale_playing_coords(stitched_data, match['stadium'])\n result[role][0 if sec == '1st' else 1] = stitched_data\n print('done')\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:, 1:3])\n for i in [0, 1]:\n rescale_playing_coords(ball_data[i][:, 1:3], match['stadium'])\n result['ball'][0] = ball_data[0][:, 1:3]\n result['ball'][1] = ball_data[1][:, 1:3]\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n\n\nif __name__ == '__main__':\n section = '2nd'\n kk = pos_data['home'][section]\n kks = sort_position_data(kk)\n bb = ball_data[section != '1st']\n ss = stitch_position_data(kks, bb)\n data_transformed = run(pos_data, ball_data, match)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 23:54:17 2015\n\n@author: rein\n@license: MIT\n@version: 0.1\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport footballpy.processing.ragged_array as ra\n\n\"\"\" Ranking dictionary necessary to determine the column number\n of each player.\n \n The type system depends on the type of the raw data.\n Type A: Elaborate positioning scheme\n Type B: Simple scheme\n Type C: Amisco-scheme\n\"\"\"\n__position_ranking = {\n 'A': {\n 'TW':1, 'LV':2, 'IVL':3, 'IVZ':4, 'IVR':5, 'RV':6,\n 'DML':7, 'DMZ':8, 'DMR':9,\n 'LM':10, 'HL':11, 'MZ': 12, 'HR':13, 'RM':14,\n 'OLM':15, 'ZO':16, 'ORM':17,\n 'HST':18, 'LA':19, 'STL':20, 'STR':21, 'RA':22,\n 'STZ':23\n },\n 'B': {\n 'G': 1, 'D': 2, 'M': 3, 'A': 4 \n },\n 'C': {\n 'goalie': 1, 'defenseman': 2, 'mid-fielder': 3, \n 'forward': 4\n }\n}\n\n\ndef sort_position_data(pos,type='A'):\n \"\"\"Sorts the position data according to player positions.\n\n As the final matrix should contain the player according to their\n position starting from left to right from back to front the indexed\n ragged array list should be sorted such that the entries match\n this format.\n \n Args:\n pos: The list with tuples containing the position data and the\n playing position.\n type: The type of position rankings used by the tracking system. \n Type A is default.\n Returns: \n The sorted list.\n \"\"\"\n ranking_type = __position_ranking[type]\n return sorted(pos,key=lambda player: ranking_type[player[2]])\n\n\ndef stitch_position_data(pos,ball,NO_PLAYERS=11):\n \"\"\"Puts position data into a single array.\n \n stitch_position_data does not change the ordering of the data and\n stitches the position data together as given. Therefore, if the playing\n position must be controlled sort_position_data must be called first.\n Args:\n pos: position data list (indexed ragged array)\n ball: list with two matrices (1st and 2nd half)\n NO_PLAYERS: default = 11\n Returns:\n output_fields: \n \"\"\"\n # magic numbers\n _MISSING_ = -2.0**13\n _NO_DIM_ = 2 # x- and y-coordinates\n _POST_LOOK_ = 20\n # end magic numbers\n \n frames = ball[:,0]\n min_frame = min(frames)\n max_frame = max(frames)\n no_frames = ball.shape[0]\n if no_frames != (max_frame - min_frame + 1):\n raise IndexError(\"No of ball frames doesn't match\")\n \n no_players_input = len(pos)\n\n input_fields = ra.expand_indexed_ragged_array(pos, frames, \n lambda x: x[1], _MISSING_)\n input_fields_clean = ra.drop_expanded_ragged_entries(input_fields,NO_PLAYERS*_NO_DIM_,_MISSING_)\n output_fields = ra.condense_expanded_ragged_array(input_fields, missing_id = _MISSING_)\n \n return output_fields\n\n\ndef determine_playing_direction(goalie):\n \"\"\" Determines the teams' playing direction.\n \n Determines the playing direction using\n the average position of the goalie.\n\n Args:\n goalie: x-y position of goalie\n Returns:\n either 'l2r': left to right or 'r2l': right to left.\n \"\"\"\n return 'l2r' if np.average(goalie[:,0]) < 0 else 'r2l'\n\n\ndef switch_playing_direction(position_coords):\n \"\"\"Switches the position coordinates.\n \n Mirrors the position coordinates either from left to right or vice versa.\n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords: x-y position coordinates of the players.\n Returns:\n Nothing, the matrix coordinates are flipped in place.\n \"\"\"\n # just mirrors the x-coordinate in place\n position_coords[:,0::2] *= -1\n\n\ndef rescale_playing_coords(position_coords,pitch_dim):\n \"\"\"Relocates the origin to left-bottom and rescales to [0,10] height/width.\n \n The routine assumes that the origin (0,0) is localized at the width and \n length midpoints.\n -----------------\n | |\n |_ |\n | | (0,0)\n |_| |\n | |\n | |\n -----------------\n Args:\n position_coords:\n pitch_dim:\n Returns:\n Nothing, the matrix coordinates are scaled in place.\n \"\"\"\n pitch_width = pitch_dim['width']\n pitch_length = pitch_dim['length']\n # translate to bottom-left corner\n position_coords[:,0::2] += pitch_length/2 # x-coordinates\n position_coords[:,1::2] += pitch_width/2 # y-coordinates\n # rescale to [0,10]\n position_coords[:,0::2] *= 10.0/pitch_length # x-coordinates\n position_coords[:,1::2] *= 10.0/pitch_width # y-coordinates\n\n\ndef clamp_values(result,vmin=0.0, vmax=10.0):\n \"\"\"Clamps the position values to [0,10]\n\n Args:\n result:\n vmin: minimum value\n vmax = maximum value\n Returns:\n None. Matrix is clamped in place.\n \"\"\"\n for entry in result:\n for ht in result[entry]:\n ht[ht<vmin] = vmin\n ht[ht>vmax] = vmax\n\n\ndef run(pos_data,ball_data,match,ranking_type='A'):\n \"\"\"Driver routine to run all processing steps.\n \n Args:\n ranking_type: Specifies which postion_ranking system should be used.\n Returns:\n \"\"\"\n roles = ['home','guest']\n sections = ['1st','2nd']\n result = {'home':[0]*2, 'guest':[0]*2, 'ball':[0]*2}\n \n # switch for l2r switching mode\n l2r_section = 0\n\n # processing player position data first \n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role,sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec], ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data,ball_data[sec!='1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[:,0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec=='1st' else 1\n rescale_playing_coords(stitched_data,match['stadium'])\n result[role][0 if sec=='1st' else 1] = stitched_data\n print('done')\n \n # processing ball data\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:,1:3])\n for i in [0,1]:\n rescale_playing_coords(ball_data[i][:,1:3],match['stadium'])\n result['ball'][0] = ball_data[0][:,1:3]\n result['ball'][1] = ball_data[1][:,1:3]\n\n #correct value ranges.\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result\n \n \nif __name__ == '__main__':\n#teams, match, pos_data,ball_data\n section = '2nd'\n kk = pos_data['home'][section] \n kks = sort_position_data(kk)\n bb = ball_data[section!='1st']\n ss = stitch_position_data(kks,bb)\n data_transformed = run(pos_data,ball_data,match)\n",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
from .proxies import Proxies
from .roles import Roles
from .products import Products
from .resourcefiles import ResourceFiles
class Apigee(object):
"""Provides easy access to all endpoint classes
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
"""
def __init__(self, org_name, username, password):
self.proxies = Proxies(org_name, username, password)
self.roles = Roles(org_name, username, password)
self.products = Products(org_name, username, password)
self.resourcefiles = ResourceFiles(org_name, username, password,
environment)
|
normal
|
{
"blob_id": "656927013d9a0254e2bc4cdf05b7cfd5947feb05",
"index": 7868,
"step-1": "<mask token>\n\n\nclass Apigee(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Apigee(object):\n <mask token>\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-3": "<mask token>\n\n\nclass Apigee(object):\n \"\"\"Provides easy access to all endpoint classes\n\n Args:\n domain (str): Your Auth0 domain, e.g: 'username.auth0.com'\n\n token (str): Management API v2 Token\n \"\"\"\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-4": "from .proxies import Proxies\nfrom .roles import Roles\nfrom .products import Products\nfrom .resourcefiles import ResourceFiles\n\n\nclass Apigee(object):\n \"\"\"Provides easy access to all endpoint classes\n\n Args:\n domain (str): Your Auth0 domain, e.g: 'username.auth0.com'\n\n token (str): Management API v2 Token\n \"\"\"\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class NGram(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@abc.abstractmethod
def load_text(self, text):
pass
def load_ngram(self):
counts = self.empty_count()
c = self.n
while c < len(self.text):
l = self.text[c]
p = '^'.join(self.prev_n(c))
if l:
if p not in counts[l]:
counts[l][p] = 1
else:
counts[l][p] += 1
c += 1
self.counts = counts
<|reserved_special_token_0|>
def prev_n(self, i):
return self.text[i - self.n:i]
<|reserved_special_token_0|>
def generate_sentence(self, length):
c = length
s = []
while c > 0:
if len(s) < self.n:
sampling = self.sample(s)
else:
sampling = self.sample(s[len(s) - self.n:])
s.append(sampling)
c -= 1
return self.SEP.join(s)
<|reserved_special_token_0|>
def distribution(self, previous):
tokens = []
counts = []
for token in self.counts.keys():
count = self.get_count(token, previous)
tokens.append(token)
counts.append(count)
s = sum(counts)
probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))
return tokens, map(probability, counts)
@abc.abstractmethod
def cols(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NGram(object):
<|reserved_special_token_0|>
def __init__(self, n, text):
self.n = n
self.load_text(text)
self.load_ngram()
@abc.abstractmethod
def load_text(self, text):
pass
def load_ngram(self):
counts = self.empty_count()
c = self.n
while c < len(self.text):
l = self.text[c]
p = '^'.join(self.prev_n(c))
if l:
if p not in counts[l]:
counts[l][p] = 1
else:
counts[l][p] += 1
c += 1
self.counts = counts
<|reserved_special_token_0|>
def prev_n(self, i):
return self.text[i - self.n:i]
def empty_count(self):
s = {}
return {c: dict() for c in self.cols()}
def generate_sentence(self, length):
c = length
s = []
while c > 0:
if len(s) < self.n:
sampling = self.sample(s)
else:
sampling = self.sample(s[len(s) - self.n:])
s.append(sampling)
c -= 1
return self.SEP.join(s)
<|reserved_special_token_0|>
def distribution(self, previous):
tokens = []
counts = []
for token in self.counts.keys():
count = self.get_count(token, previous)
tokens.append(token)
counts.append(count)
s = sum(counts)
probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))
return tokens, map(probability, counts)
@abc.abstractmethod
def cols(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NGram(object):
<|reserved_special_token_0|>
def __init__(self, n, text):
self.n = n
self.load_text(text)
self.load_ngram()
@abc.abstractmethod
def load_text(self, text):
pass
def load_ngram(self):
counts = self.empty_count()
c = self.n
while c < len(self.text):
l = self.text[c]
p = '^'.join(self.prev_n(c))
if l:
if p not in counts[l]:
counts[l][p] = 1
else:
counts[l][p] += 1
c += 1
self.counts = counts
<|reserved_special_token_0|>
def prev_n(self, i):
return self.text[i - self.n:i]
def empty_count(self):
s = {}
return {c: dict() for c in self.cols()}
def generate_sentence(self, length):
c = length
s = []
while c > 0:
if len(s) < self.n:
sampling = self.sample(s)
else:
sampling = self.sample(s[len(s) - self.n:])
s.append(sampling)
c -= 1
return self.SEP.join(s)
def sample(self, previous):
assert len(previous) <= self.n
tokens, distribution = self.distribution('^'.join(previous))
i = np.nonzero(np.random.multinomial(1, distribution))[0][0]
return tokens[i]
def distribution(self, previous):
tokens = []
counts = []
for token in self.counts.keys():
count = self.get_count(token, previous)
tokens.append(token)
counts.append(count)
s = sum(counts)
probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))
return tokens, map(probability, counts)
@abc.abstractmethod
def cols(self):
pass
@staticmethod
def clean(text):
s = text.lower()
s = re.sub('\\n', ' ', s)
s = re.sub('[^a-z ]+', ' ', s)
return s
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NGram(object):
SEP = ''
def __init__(self, n, text):
self.n = n
self.load_text(text)
self.load_ngram()
@abc.abstractmethod
def load_text(self, text):
pass
def load_ngram(self):
counts = self.empty_count()
c = self.n
while c < len(self.text):
l = self.text[c]
p = '^'.join(self.prev_n(c))
if l:
if p not in counts[l]:
counts[l][p] = 1
else:
counts[l][p] += 1
c += 1
self.counts = counts
def get_count(self, x, y=''):
if len(y) > self.n:
return 0
elif len(y) == self.n:
p = '^'.join(y)
if x in self.counts and p in self.counts[x]:
return self.counts[x][p]
else:
return 0
else:
p = '^'.join(y)
count = 0
if x in self.counts:
for x_prev in self.counts[x].keys():
if x_prev[-len(p):] == p:
count += self.counts[x][x_prev]
return count
def prev_n(self, i):
return self.text[i - self.n:i]
def empty_count(self):
s = {}
return {c: dict() for c in self.cols()}
def generate_sentence(self, length):
c = length
s = []
while c > 0:
if len(s) < self.n:
sampling = self.sample(s)
else:
sampling = self.sample(s[len(s) - self.n:])
s.append(sampling)
c -= 1
return self.SEP.join(s)
def sample(self, previous):
assert len(previous) <= self.n
tokens, distribution = self.distribution('^'.join(previous))
i = np.nonzero(np.random.multinomial(1, distribution))[0][0]
return tokens[i]
def distribution(self, previous):
tokens = []
counts = []
for token in self.counts.keys():
count = self.get_count(token, previous)
tokens.append(token)
counts.append(count)
s = sum(counts)
probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))
return tokens, map(probability, counts)
@abc.abstractmethod
def cols(self):
pass
@staticmethod
def clean(text):
s = text.lower()
s = re.sub('\\n', ' ', s)
s = re.sub('[^a-z ]+', ' ', s)
return s
<|reserved_special_token_1|>
from __future__ import division
import abc
import re
import numpy as np
class NGram(object):
SEP = ''
def __init__(self, n, text):
self.n = n
self.load_text(text)
self.load_ngram()
@abc.abstractmethod
def load_text(self, text):
pass
def load_ngram(self):
counts = self.empty_count()
c = self.n
while c < len(self.text):
l = self.text[c]
p = '^'.join(self.prev_n(c))
if l:
if p not in counts[l]:
counts[l][p] = 1
else:
counts[l][p] += 1
c += 1
self.counts = counts
def get_count(self, x, y=''):
if len(y) > self.n:
# raise RuntimeError('Invalid n-gram')
return 0
elif len(y) == self.n:
p = '^'.join(y)
if x in self.counts and p in self.counts[x]:
return self.counts[x][p]
else:
return 0
else:
p = '^'.join(y)
count = 0
if x in self.counts:
for x_prev in self.counts[x].keys():
if x_prev[-len(p):] == p:
count += self.counts[x][x_prev]
return count
def prev_n(self, i):
return self.text[i - self.n: i]
def empty_count(self):
s = {}
return { c: dict() for c in self.cols() }
def generate_sentence(self, length):
c = length
s = []
while c > 0:
if len(s) < self.n:
sampling = self.sample(s)
else:
sampling = self.sample(s[(len(s) - self.n):])
s.append(sampling)
c -= 1
return self.SEP.join(s)
def sample(self, previous):
assert len(previous) <= self.n
tokens, distribution = self.distribution('^'.join(previous))
i = np.nonzero(np.random.multinomial(1, distribution))[0][0]
return tokens[i]
def distribution(self, previous):
tokens = []
counts = []
for token in self.counts.keys():
count = self.get_count(token, previous)
tokens.append(token)
counts.append(count)
s = sum(counts)
probability = s and (lambda c: c / s) or (lambda c: 1/len(counts))
return (tokens, map(probability, counts))
@abc.abstractmethod
def cols(self):
pass
@staticmethod
def clean(text):
s = text.lower()
s = re.sub(r'\n', ' ', s)
s = re.sub(r'[^a-z ]+', ' ', s)
return s
|
flexible
|
{
"blob_id": "41e3c18b02f9d80f987d09227da1fbc6bde0ed1d",
"index": 4812,
"step-1": "<mask token>\n\n\nclass NGram(object):\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n <mask token>\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n <mask token>\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n <mask token>\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NGram(object):\n <mask token>\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n <mask token>\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n\n def empty_count(self):\n s = {}\n return {c: dict() for c in self.cols()}\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n <mask token>\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NGram(object):\n <mask token>\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n <mask token>\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n\n def empty_count(self):\n s = {}\n return {c: dict() for c in self.cols()}\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n\n def sample(self, previous):\n assert len(previous) <= self.n\n tokens, distribution = self.distribution('^'.join(previous))\n i = np.nonzero(np.random.multinomial(1, distribution))[0][0]\n return tokens[i]\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n\n @staticmethod\n def clean(text):\n s = text.lower()\n s = re.sub('\\\\n', ' ', s)\n s = re.sub('[^a-z ]+', ' ', s)\n return s\n",
"step-4": "<mask token>\n\n\nclass NGram(object):\n SEP = ''\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n self.counts = counts\n\n def get_count(self, x, y=''):\n if len(y) > self.n:\n return 0\n elif len(y) == self.n:\n p = '^'.join(y)\n if x in self.counts and p in self.counts[x]:\n return self.counts[x][p]\n else:\n return 0\n else:\n p = '^'.join(y)\n count = 0\n if x in self.counts:\n for x_prev in self.counts[x].keys():\n if x_prev[-len(p):] == p:\n count += self.counts[x][x_prev]\n return count\n\n def prev_n(self, i):\n return self.text[i - self.n:i]\n\n def empty_count(self):\n s = {}\n return {c: dict() for c in self.cols()}\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[len(s) - self.n:])\n s.append(sampling)\n c -= 1\n return self.SEP.join(s)\n\n def sample(self, previous):\n assert len(previous) <= self.n\n tokens, distribution = self.distribution('^'.join(previous))\n i = np.nonzero(np.random.multinomial(1, distribution))[0][0]\n return tokens[i]\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1 / len(counts))\n return tokens, map(probability, counts)\n\n @abc.abstractmethod\n def cols(self):\n pass\n\n @staticmethod\n def clean(text):\n s = text.lower()\n s = re.sub('\\\\n', ' ', s)\n s = re.sub('[^a-z ]+', ' ', s)\n return s\n",
"step-5": "from __future__ import division\nimport abc\nimport re\nimport numpy as np\n\nclass NGram(object):\n SEP = ''\n\n def __init__(self, n, text):\n self.n = n\n self.load_text(text)\n self.load_ngram()\n\n @abc.abstractmethod\n def load_text(self, text):\n pass\n\n def load_ngram(self):\n counts = self.empty_count()\n\n c = self.n\n while c < len(self.text):\n l = self.text[c]\n p = '^'.join(self.prev_n(c))\n\n if l:\n if p not in counts[l]:\n counts[l][p] = 1\n else:\n counts[l][p] += 1\n c += 1\n\n self.counts = counts\n\n def get_count(self, x, y=''):\n if len(y) > self.n:\n # raise RuntimeError('Invalid n-gram')\n return 0\n elif len(y) == self.n:\n p = '^'.join(y)\n if x in self.counts and p in self.counts[x]:\n return self.counts[x][p]\n else:\n return 0\n else:\n p = '^'.join(y)\n count = 0\n if x in self.counts:\n for x_prev in self.counts[x].keys():\n if x_prev[-len(p):] == p:\n count += self.counts[x][x_prev]\n return count\n\n def prev_n(self, i):\n return self.text[i - self.n: i]\n\n def empty_count(self):\n s = {}\n return { c: dict() for c in self.cols() }\n\n def generate_sentence(self, length):\n c = length\n s = []\n while c > 0:\n if len(s) < self.n:\n sampling = self.sample(s)\n else:\n sampling = self.sample(s[(len(s) - self.n):])\n s.append(sampling)\n c -= 1\n\n return self.SEP.join(s)\n\n def sample(self, previous):\n assert len(previous) <= self.n\n tokens, distribution = self.distribution('^'.join(previous))\n i = np.nonzero(np.random.multinomial(1, distribution))[0][0]\n return tokens[i]\n\n def distribution(self, previous):\n tokens = []\n counts = []\n for token in self.counts.keys():\n count = self.get_count(token, previous)\n tokens.append(token)\n counts.append(count)\n\n s = sum(counts)\n probability = s and (lambda c: c / s) or (lambda c: 1/len(counts))\n return (tokens, map(probability, counts))\n\n @abc.abstractmethod\n def cols(self):\n pass\n\n @staticmethod\n def clean(text):\n s = text.lower()\n s = re.sub(r'\\n', ' ', s)\n s = re.sub(r'[^a-z ]+', ' ', s)\n return s\n",
"step-ids": [
7,
9,
11,
13,
15
]
}
|
[
7,
9,
11,
13,
15
] |
from .base import *
import os
SECRET_KEY = os.environ['SECRET_KEY']
ALLOWED_HOSTS = ['demo.pythonic.nl']
DEBUG = False
|
normal
|
{
"blob_id": "e5607d9893b775b216d1790897124a673b190c26",
"index": 2085,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSECRET_KEY = os.environ['SECRET_KEY']\nALLOWED_HOSTS = ['demo.pythonic.nl']\nDEBUG = False\n",
"step-3": "from .base import *\nimport os\nSECRET_KEY = os.environ['SECRET_KEY']\nALLOWED_HOSTS = ['demo.pythonic.nl']\nDEBUG = False\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import fs
gInfo = {
'obj': g2.go(capUrl),
'Headers-C-T': g2.response.headers['Content-Type'],
'url': g2.response.url,
'urlDetails': g2.response.url_details()
}
capHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True, fix_special_entities=True)
b64cap = re.findall(r'base64,(.*?)\\" id=', capHtml, re.DOTALL)
savecaptcha = open(file="/home/ubuntu/captcha.png", mode="w")
savecaptcha.write(b64cap[0])
savecaptcha.close()
f = open(file="/home/ubuntu/captcha.png", mode="rb")
r = f.read()
i = base64.b64decode(r)
f.close()
fincapfile = open(file="/home/ubuntu/workspace/ffcap.jpeg", mode="wb")
capsave = fincapfile.write(i)
fincapfile.close()
|
normal
|
{
"blob_id": "2a5f69fbb26bd1f94c10ff0da687391bf5bd3c23",
"index": 6054,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsavecaptcha.write(b64cap[0])\nsavecaptcha.close()\n<mask token>\nf.close()\n<mask token>\nfincapfile.close()\n",
"step-3": "<mask token>\ngInfo = {'obj': g2.go(capUrl), 'Headers-C-T': g2.response.headers[\n 'Content-Type'], 'url': g2.response.url, 'urlDetails': g2.response.\n url_details()}\ncapHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True,\n fix_special_entities=True)\nb64cap = re.findall('base64,(.*?)\\\\\\\\\" id=', capHtml, re.DOTALL)\nsavecaptcha = open(file='/home/ubuntu/captcha.png', mode='w')\nsavecaptcha.write(b64cap[0])\nsavecaptcha.close()\nf = open(file='/home/ubuntu/captcha.png', mode='rb')\nr = f.read()\ni = base64.b64decode(r)\nf.close()\nfincapfile = open(file='/home/ubuntu/workspace/ffcap.jpeg', mode='wb')\ncapsave = fincapfile.write(i)\nfincapfile.close()\n",
"step-4": "import fs\ngInfo = {'obj': g2.go(capUrl), 'Headers-C-T': g2.response.headers[\n 'Content-Type'], 'url': g2.response.url, 'urlDetails': g2.response.\n url_details()}\ncapHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True,\n fix_special_entities=True)\nb64cap = re.findall('base64,(.*?)\\\\\\\\\" id=', capHtml, re.DOTALL)\nsavecaptcha = open(file='/home/ubuntu/captcha.png', mode='w')\nsavecaptcha.write(b64cap[0])\nsavecaptcha.close()\nf = open(file='/home/ubuntu/captcha.png', mode='rb')\nr = f.read()\ni = base64.b64decode(r)\nf.close()\nfincapfile = open(file='/home/ubuntu/workspace/ffcap.jpeg', mode='wb')\ncapsave = fincapfile.write(i)\nfincapfile.close()\n",
"step-5": "import fs\n\n\ngInfo = {\n\n'obj': g2.go(capUrl),\n\n'Headers-C-T': g2.response.headers['Content-Type'],\n\n'url': g2.response.url,\n\n'urlDetails': g2.response.url_details()\n\n}\n\ncapHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True, fix_special_entities=True)\n\nb64cap = re.findall(r'base64,(.*?)\\\\\" id=', capHtml, re.DOTALL)\n\nsavecaptcha = open(file=\"/home/ubuntu/captcha.png\", mode=\"w\")\n\nsavecaptcha.write(b64cap[0])\n\nsavecaptcha.close()\n\nf = open(file=\"/home/ubuntu/captcha.png\", mode=\"rb\")\n\nr = f.read()\n\ni = base64.b64decode(r)\n\nf.close()\n\nfincapfile = open(file=\"/home/ubuntu/workspace/ffcap.jpeg\", mode=\"wb\")\n\ncapsave = fincapfile.write(i)\n\nfincapfile.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TEST_HEAD = """
>>>>>>
>>>>>> Test in progress: {0}
>>>>>>"""
TEST_TAIL = '>>>>>> Test execution done, tearDown\n\r'
<|reserved_special_token_1|>
"""
ConstantsCommands.py
"""
TEST_HEAD = "\n >>>>>> " \
"\n >>>>>> Test in progress: {0}" \
"\n >>>>>>"
TEST_TAIL = ">>>>>> Test execution done, tearDown\n\r"
|
flexible
|
{
"blob_id": "45f0a7a78184195a593061d863ff2114abe01a46",
"index": 6321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nTEST_HEAD = \"\"\"\n >>>>>> \n >>>>>> Test in progress: {0}\n >>>>>>\"\"\"\nTEST_TAIL = '>>>>>> Test execution done, tearDown\\n\\r'\n",
"step-3": "\"\"\"\nConstantsCommands.py\n\"\"\"\n\nTEST_HEAD = \"\\n >>>>>> \" \\\n \"\\n >>>>>> Test in progress: {0}\" \\\n \"\\n >>>>>>\"\n\nTEST_TAIL = \">>>>>> Test execution done, tearDown\\n\\r\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))
def pull_forecast(city, api_key):
"""
Extract the 5-day 3-hour forecast for the provided City.
"""
base_url = 'http://api.openweathermap.org/data/2.5/forecast?'
url = base_url + 'appid=' + api_key + '&q=' + city
r = requests.get(url)
r.raise_for_status()
data = r.json()
return data
@task
def is_raining_tomorrow(data):
"""
Given a list of hourly forecasts, returns a boolean specifying
whether there is rain in tomorrow's forecast.
"""
pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')
rain = [w for forecast in data['list'] for w in forecast['weather'] if
w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]
if not bool(rain):
raise SKIP('There is no rain in the forecast for tomorrow.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))
def pull_forecast(city, api_key):
"""
Extract the 5-day 3-hour forecast for the provided City.
"""
base_url = 'http://api.openweathermap.org/data/2.5/forecast?'
url = base_url + 'appid=' + api_key + '&q=' + city
r = requests.get(url)
r.raise_for_status()
data = r.json()
return data
@task
def is_raining_tomorrow(data):
"""
Given a list of hourly forecasts, returns a boolean specifying
whether there is rain in tomorrow's forecast.
"""
pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')
rain = [w for forecast in data['list'] for w in forecast['weather'] if
w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]
if not bool(rain):
raise SKIP('There is no rain in the forecast for tomorrow.')
<|reserved_special_token_0|>
with Flow('Umbrella Flow') as flow:
forecast = pull_forecast(city=city, api_key=api_key)
rain = is_raining_tomorrow(forecast)
notification.set_upstream(rain)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
city = Parameter(name='City', default='San Jose')
api_key = Secret('WEATHER_API_KEY')
@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))
def pull_forecast(city, api_key):
"""
Extract the 5-day 3-hour forecast for the provided City.
"""
base_url = 'http://api.openweathermap.org/data/2.5/forecast?'
url = base_url + 'appid=' + api_key + '&q=' + city
r = requests.get(url)
r.raise_for_status()
data = r.json()
return data
@task
def is_raining_tomorrow(data):
"""
Given a list of hourly forecasts, returns a boolean specifying
whether there is rain in tomorrow's forecast.
"""
pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')
rain = [w for forecast in data['list'] for w in forecast['weather'] if
w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]
if not bool(rain):
raise SKIP('There is no rain in the forecast for tomorrow.')
notification = SlackTask(message=
'There is rain in the forecast for tomorrow - better take your umbrella out!'
, webhook_secret='DAVID_SLACK_URL')
with Flow('Umbrella Flow') as flow:
forecast = pull_forecast(city=city, api_key=api_key)
rain = is_raining_tomorrow(forecast)
notification.set_upstream(rain)
<|reserved_special_token_1|>
import datetime
import pendulum
import requests
from prefect import task, Flow, Parameter
from prefect.engine.signals import SKIP
from prefect.tasks.notifications.slack_task import SlackTask
from prefect.tasks.secrets import Secret
city = Parameter(name='City', default='San Jose')
api_key = Secret('WEATHER_API_KEY')
@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))
def pull_forecast(city, api_key):
"""
Extract the 5-day 3-hour forecast for the provided City.
"""
base_url = 'http://api.openweathermap.org/data/2.5/forecast?'
url = base_url + 'appid=' + api_key + '&q=' + city
r = requests.get(url)
r.raise_for_status()
data = r.json()
return data
@task
def is_raining_tomorrow(data):
"""
Given a list of hourly forecasts, returns a boolean specifying
whether there is rain in tomorrow's forecast.
"""
pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')
rain = [w for forecast in data['list'] for w in forecast['weather'] if
w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]
if not bool(rain):
raise SKIP('There is no rain in the forecast for tomorrow.')
notification = SlackTask(message=
'There is rain in the forecast for tomorrow - better take your umbrella out!'
, webhook_secret='DAVID_SLACK_URL')
with Flow('Umbrella Flow') as flow:
forecast = pull_forecast(city=city, api_key=api_key)
rain = is_raining_tomorrow(forecast)
notification.set_upstream(rain)
<|reserved_special_token_1|>
import datetime
import pendulum
import requests
from prefect import task, Flow, Parameter
from prefect.engine.signals import SKIP
from prefect.tasks.notifications.slack_task import SlackTask
from prefect.tasks.secrets import Secret
city = Parameter(name="City", default="San Jose")
api_key = Secret("WEATHER_API_KEY")
@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))
def pull_forecast(city, api_key):
"""
Extract the 5-day 3-hour forecast for the provided City.
"""
base_url = "http://api.openweathermap.org/data/2.5/forecast?"
url = base_url + "appid=" + api_key + "&q=" + city
r = requests.get(url)
r.raise_for_status()
data = r.json()
return data
@task
def is_raining_tomorrow(data):
"""
Given a list of hourly forecasts, returns a boolean specifying
whether there is rain in tomorrow's forecast.
"""
pendulum.now("utc").add(days=1).strftime("%Y-%m-%d")
rain = [
w
for forecast in data["list"]
for w in forecast["weather"]
if w["main"] == "Rain" and forecast["dt_txt"].startswith(tomorrow)
]
if not bool(rain):
raise SKIP("There is no rain in the forecast for tomorrow.")
notification = SlackTask(
message="There is rain in the forecast for tomorrow - better take your umbrella out!",
webhook_secret="DAVID_SLACK_URL",
)
with Flow("Umbrella Flow") as flow:
forecast = pull_forecast(city=city, api_key=api_key)
rain = is_raining_tomorrow(forecast)
notification.set_upstream(rain)
|
flexible
|
{
"blob_id": "7f52354487f85a0bf1783c8aa76f228ef17e6d6b",
"index": 5119,
"step-1": "<mask token>\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\n<mask token>\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-3": "<mask token>\ncity = Parameter(name='City', default='San Jose')\napi_key = Secret('WEATHER_API_KEY')\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\nnotification = SlackTask(message=\n 'There is rain in the forecast for tomorrow - better take your umbrella out!'\n , webhook_secret='DAVID_SLACK_URL')\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-4": "import datetime\nimport pendulum\nimport requests\nfrom prefect import task, Flow, Parameter\nfrom prefect.engine.signals import SKIP\nfrom prefect.tasks.notifications.slack_task import SlackTask\nfrom prefect.tasks.secrets import Secret\ncity = Parameter(name='City', default='San Jose')\napi_key = Secret('WEATHER_API_KEY')\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\nnotification = SlackTask(message=\n 'There is rain in the forecast for tomorrow - better take your umbrella out!'\n , webhook_secret='DAVID_SLACK_URL')\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-5": "import datetime\nimport pendulum\nimport requests\nfrom prefect import task, Flow, Parameter\nfrom prefect.engine.signals import SKIP\nfrom prefect.tasks.notifications.slack_task import SlackTask\nfrom prefect.tasks.secrets import Secret\n\n\ncity = Parameter(name=\"City\", default=\"San Jose\")\napi_key = Secret(\"WEATHER_API_KEY\")\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = \"http://api.openweathermap.org/data/2.5/forecast?\"\n url = base_url + \"appid=\" + api_key + \"&q=\" + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now(\"utc\").add(days=1).strftime(\"%Y-%m-%d\")\n rain = [\n w\n for forecast in data[\"list\"]\n for w in forecast[\"weather\"]\n if w[\"main\"] == \"Rain\" and forecast[\"dt_txt\"].startswith(tomorrow)\n ]\n if not bool(rain):\n raise SKIP(\"There is no rain in the forecast for tomorrow.\")\n\n\nnotification = SlackTask(\n message=\"There is rain in the forecast for tomorrow - better take your umbrella out!\",\n webhook_secret=\"DAVID_SLACK_URL\",\n)\n\n\nwith Flow(\"Umbrella Flow\") as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# import gmplot package
import gmplot
import numpy as np
# generate 700 random lats and lons
latitude = (np.random.random_sample(size = 700) - 0.5) * 180
longitude = (np.random.random_sample(size = 700) - 0.5) * 360
# declare the center of the map, and how much we want the map zoomed in
gmap = gmplot.GoogleMapPlotter(0, 0, 2)
# plot heatmap
gmap.heatmap(latitude, longitude)
gmap.scatter(latitude, longitude, c='r', marker=True)
#Your Google_API_Key
gmap.apikey = "AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00"
# save it to html
gmap.draw("c:\\users\\jackc\desktop\\country_heatmap.html")
'''
import csv
import pandas as pd
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import mplcursors
import gmplot
def outputScatter():
data = pd.read_csv('C:\\Users\\jackc\\Desktop\\ctran\dataMerge.csv')
df = data.groupby('location_id')
gmap = gmplot.GoogleMapPlotter(0,0,2)
counter = 0
result = []
result_lon = []
result_lat = []
result_calculation = []
result_lon_static = []
result_lat_static = []
result_toSCV = []
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
index = 0
colors = ['r','y','g','b']
for x,y in df:
for z in range(y.location_distance.values.size):
result_lon_static.append(y.y_coordinate.values[z])
result_lat_static.append(y.x_coordinate.values[z])
if(y.location_distance.values[z] > 30):
counter = counter + 1
if(y.location_distance.values[z] > 50):
above50ft = above50ft + 1
if(y.location_distance.values[z] > 70):
above70ft = above70ft + 1
if(y.location_distance.values[z] > 90):
above90ft = above90ft + 1
if(y.location_distance.values[z] > 150):
above150ft = above150ft + 1
cal=counter/(y.location_distance.values.size)
result.append([y.stop_code.values[0], cal, y.stop_lat.values[0], y.stop_lon.values[0]])
result_lat.append(y.stop_lat.values[0])
result_lon.append(y.stop_lon.values[0])
result_calculation.append(cal)
result_toSCV.append([y.stop_code.values[0], cal, y.location_distance.values.size, counter, above50ft, above70ft, above90ft, above150ft])
index = index+1
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
counter = 0
result = sorted(result,key=itemgetter(1), reverse=True)
result_toSCV = sorted(result_toSCV, key=itemgetter(1), reverse=True)
plt.scatter(result_lat_static,result_lon_static, c='black')
code_id = []
for x in result:
#code_id.append(x[0])
#result_calculation.append(x[1])
#result_lat.append(x[2])
#result_lon.append(x[3])
if x[1] > 0.9:
red = plt.scatter(x[3],x[2], c=colors[0], label='>90%')
#red = plt.scatter(x[3],x[2], c=colors[0], label=x[0])
elif x[1] > 0.8:
yellow = plt.scatter(x[3],x[2], c=colors[1], label='>80%')
#yellow = plt.scatter(x[3],x[2], c=colors[1], label=x[0])
elif x[1] > 0.7:
green = plt.scatter(x[3],x[2], c=colors[2], label='>70%')
#green = plt.scatter(x[3],x[2], c=colors[2], label=x[0])
else:
blue = plt.scatter(x[3],x[2], c=colors[3], label='>60%')
#blue = plt.scatter(x[3],x[2], c=colors[3], label=x[0])
with open('C:\\Users\\Jackc\\Desktop\\Ctran\\outputPercentError.csv', mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['location_id', 'percent_Error', 'total_count', 'above30ft', 'above50ft', 'above70ft', 'above90ft', 'above150ft'])
for x in result_toSCV:
writer.writerow(x)
'''
|
normal
|
{
"blob_id": "1cc77ed1c5da025d1b539df202bbd3310a174eac",
"index": 3902,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\n<mask token>\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-3": "<mask token>\nlatitude = (np.random.random_sample(size=700) - 0.5) * 180\nlongitude = (np.random.random_sample(size=700) - 0.5) * 360\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\ngmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-4": "import gmplot\nimport numpy as np\nlatitude = (np.random.random_sample(size=700) - 0.5) * 180\nlongitude = (np.random.random_sample(size=700) - 0.5) * 360\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\ngmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-5": "# import gmplot package\nimport gmplot\nimport numpy as np\n# generate 700 random lats and lons\nlatitude = (np.random.random_sample(size = 700) - 0.5) * 180\nlongitude = (np.random.random_sample(size = 700) - 0.5) * 360\n# declare the center of the map, and how much we want the map zoomed in\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\n# plot heatmap\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\n#Your Google_API_Key\ngmap.apikey = \"AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00\"\n# save it to html\ngmap.draw(\"c:\\\\users\\\\jackc\\desktop\\\\country_heatmap.html\")\n\n'''\nimport csv\nimport pandas as pd\nfrom operator import itemgetter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport mplcursors\nimport gmplot\n\ndef outputScatter():\n data = pd.read_csv('C:\\\\Users\\\\jackc\\\\Desktop\\\\ctran\\dataMerge.csv')\n df = data.groupby('location_id')\n\tgmap = gmplot.GoogleMapPlotter(0,0,2)\n counter = 0\n result = []\n result_lon = []\n result_lat = []\n result_calculation = []\n result_lon_static = []\n result_lat_static = []\n result_toSCV = []\n above50ft = 0\n above70ft = 0\n above90ft = 0\n above150ft = 0\n index = 0\n colors = ['r','y','g','b']\n\n for x,y in df:\n for z in range(y.location_distance.values.size):\n result_lon_static.append(y.y_coordinate.values[z])\n result_lat_static.append(y.x_coordinate.values[z])\n if(y.location_distance.values[z] > 30):\n counter = counter + 1\n if(y.location_distance.values[z] > 50):\n above50ft = above50ft + 1\n if(y.location_distance.values[z] > 70):\n above70ft = above70ft + 1\n if(y.location_distance.values[z] > 90):\n above90ft = above90ft + 1\n if(y.location_distance.values[z] > 150):\n above150ft = above150ft + 1\n\n cal=counter/(y.location_distance.values.size)\n result.append([y.stop_code.values[0], cal, y.stop_lat.values[0], y.stop_lon.values[0]])\n result_lat.append(y.stop_lat.values[0])\n result_lon.append(y.stop_lon.values[0])\n result_calculation.append(cal)\n result_toSCV.append([y.stop_code.values[0], cal, y.location_distance.values.size, counter, above50ft, above70ft, above90ft, above150ft])\n index = index+1\n above50ft = 0\n above70ft = 0\n above90ft = 0\n above150ft = 0\n counter = 0\n result = sorted(result,key=itemgetter(1), reverse=True)\n result_toSCV = sorted(result_toSCV, key=itemgetter(1), reverse=True)\n plt.scatter(result_lat_static,result_lon_static, c='black')\n\n code_id = []\n for x in result:\n #code_id.append(x[0])\n #result_calculation.append(x[1])\n #result_lat.append(x[2])\n #result_lon.append(x[3])\n if x[1] > 0.9:\n red = plt.scatter(x[3],x[2], c=colors[0], label='>90%')\n #red = plt.scatter(x[3],x[2], c=colors[0], label=x[0])\n\n elif x[1] > 0.8:\n yellow = plt.scatter(x[3],x[2], c=colors[1], label='>80%')\n #yellow = plt.scatter(x[3],x[2], c=colors[1], label=x[0])\n elif x[1] > 0.7:\n green = plt.scatter(x[3],x[2], c=colors[2], label='>70%')\n #green = plt.scatter(x[3],x[2], c=colors[2], label=x[0])\n else:\n blue = plt.scatter(x[3],x[2], c=colors[3], label='>60%')\n #blue = plt.scatter(x[3],x[2], c=colors[3], label=x[0])\n\n\n with open('C:\\\\Users\\\\Jackc\\\\Desktop\\\\Ctran\\\\outputPercentError.csv', mode='w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['location_id', 'percent_Error', 'total_count', 'above30ft', 'above50ft', 'above70ft', 'above90ft', 'above150ft'])\n for x in result_toSCV:\n writer.writerow(x)\n\n'''\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 19:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Assemblage', '0002_auto_20161014_1710'),
]
operations = [
migrations.RemoveField(
model_name='hotelingroup',
name='negative_votes',
),
migrations.RemoveField(
model_name='hotelingroup',
name='positive_votes',
),
migrations.RemoveField(
model_name='hotelingroup',
name='voters',
),
migrations.AddField(
model_name='hotelingroup',
name='negative_voters',
field=models.ManyToManyField(related_name='hotelingroup_negative_voters', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='hotelingroup',
name='positive_voters',
field=models.ManyToManyField(related_name='hotelingroup_positive_voters', to=settings.AUTH_USER_MODEL),
),
]
|
normal
|
{
"blob_id": "8c05259ce577e6b6a6efdf778832e9bb817e47fd",
"index": 1414,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]\n operations = [migrations.RemoveField(model_name='hotelingroup', name=\n 'negative_votes'), migrations.RemoveField(model_name='hotelingroup',\n name='positive_votes'), migrations.RemoveField(model_name=\n 'hotelingroup', name='voters'), migrations.AddField(model_name=\n 'hotelingroup', name='negative_voters', field=models.\n ManyToManyField(related_name='hotelingroup_negative_voters', to=\n settings.AUTH_USER_MODEL)), migrations.AddField(model_name=\n 'hotelingroup', name='positive_voters', field=models.\n ManyToManyField(related_name='hotelingroup_positive_voters', to=\n settings.AUTH_USER_MODEL))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]\n operations = [migrations.RemoveField(model_name='hotelingroup', name=\n 'negative_votes'), migrations.RemoveField(model_name='hotelingroup',\n name='positive_votes'), migrations.RemoveField(model_name=\n 'hotelingroup', name='voters'), migrations.AddField(model_name=\n 'hotelingroup', name='negative_voters', field=models.\n ManyToManyField(related_name='hotelingroup_negative_voters', to=\n settings.AUTH_USER_MODEL)), migrations.AddField(model_name=\n 'hotelingroup', name='positive_voters', field=models.\n ManyToManyField(related_name='hotelingroup_positive_voters', to=\n settings.AUTH_USER_MODEL))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2016-10-14 19:37\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('Assemblage', '0002_auto_20161014_1710'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='hotelingroup',\n name='negative_votes',\n ),\n migrations.RemoveField(\n model_name='hotelingroup',\n name='positive_votes',\n ),\n migrations.RemoveField(\n model_name='hotelingroup',\n name='voters',\n ),\n migrations.AddField(\n model_name='hotelingroup',\n name='negative_voters',\n field=models.ManyToManyField(related_name='hotelingroup_negative_voters', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='hotelingroup',\n name='positive_voters',\n field=models.ManyToManyField(related_name='hotelingroup_positive_voters', to=settings.AUTH_USER_MODEL),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
SUMMARY
Auxiliary functions, provided here to avoid clutter
"""
"""
Transforms a point (P = [x, y]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding discrete point (D = [xd, yd])
loc_min = [x_min, y_min]
"""
def discretize_location(P, loc_min, Δxy):
x_from_start = P[0] - loc_min[0]
y_from_start = P[1] - loc_min[1]
xd = int(x_from_start//Δxy[0])
yd = int(y_from_start//Δxy[1])
return [xd, yd]
"""
Transforms a discretized point (PD = [xd, yd]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding point (P = [x, d])
loc_min = [x_min, y_min]
"""
def continuous_location(PD, loc_min, Δxy):
x = PD[0]*Δxy[0] + loc_min[0]
y = PD[1]*Δxy[1] + loc_min[1]
return [x, y]
"""
Obtains the points in the border of a cell (starting at bottom left (BL = [x_bl, y_bl])), starting point not repeated
"""
def cell_borders(BL, Δxy):
[x_bl, y_bl] = BL
Δx = Δxy[0]
Δy = Δxy[1]
x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]
y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]
return [x_border, y_border]
"""
Appends the first element of the array to the end, useful when plotting
"""
def first_append_to_last(arr):
return arr + [arr[0]]
"""
Calculates the RMS (root mean square) value of an array
"""
def RMS(arr):
n = len(arr)
sq_sum = sum(a**2 for a in arr)
return (sq_sum/n)**0.5
"""
Calculates the L1 norm (Manhattan distance) between P1 = [x1, y1] and P2 = [x2, y2]
"""
def L1(P1, P2):
x1, y1 = P1
x2, y2 = P2
return abs(x2 - x1) + abs(y2 - y1)
"""
Turns x, y, o, v into a string of the form "x, y, v, o"
"""
def state_to_str(x, y, v, o):
return "%d, %d, %d, %d" % (x, y, v, o)
|
normal
|
{
"blob_id": "8bbc929e2ff2321b97195031fa675fbdab269fcb",
"index": 3288,
"step-1": "<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-2": "<mask token>\n\n\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n return [x_border, y_border]\n\n\n<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-3": "<mask token>\n\n\ndef continuous_location(PD, loc_min, Δxy):\n x = PD[0] * Δxy[0] + loc_min[0]\n y = PD[1] * Δxy[1] + loc_min[1]\n return [x, y]\n\n\n<mask token>\n\n\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n return [x_border, y_border]\n\n\n<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-4": "<mask token>\n\n\ndef discretize_location(P, loc_min, Δxy):\n x_from_start = P[0] - loc_min[0]\n y_from_start = P[1] - loc_min[1]\n xd = int(x_from_start // Δxy[0])\n yd = int(y_from_start // Δxy[1])\n return [xd, yd]\n\n\n<mask token>\n\n\ndef continuous_location(PD, loc_min, Δxy):\n x = PD[0] * Δxy[0] + loc_min[0]\n y = PD[1] * Δxy[1] + loc_min[1]\n return [x, y]\n\n\n<mask token>\n\n\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n return [x_border, y_border]\n\n\n<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-5": "\"\"\"\nSUMMARY\n\nAuxiliary functions, provided here to avoid clutter\n\"\"\"\n\n\n\"\"\"\nTransforms a point (P = [x, y]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding discrete point (D = [xd, yd])\nloc_min = [x_min, y_min]\n\"\"\"\ndef discretize_location(P, loc_min, Δxy):\n x_from_start = P[0] - loc_min[0]\n y_from_start = P[1] - loc_min[1]\n\n xd = int(x_from_start//Δxy[0])\n yd = int(y_from_start//Δxy[1])\n\n return [xd, yd]\n\n\n\n\"\"\"\nTransforms a discretized point (PD = [xd, yd]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding point (P = [x, d])\nloc_min = [x_min, y_min]\n\"\"\"\ndef continuous_location(PD, loc_min, Δxy):\n\n x = PD[0]*Δxy[0] + loc_min[0]\n y = PD[1]*Δxy[1] + loc_min[1]\n\n return [x, y]\n\n\n\n\"\"\"\nObtains the points in the border of a cell (starting at bottom left (BL = [x_bl, y_bl])), starting point not repeated\n\"\"\"\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n\n return [x_border, y_border]\n\n\n\"\"\"\nAppends the first element of the array to the end, useful when plotting\n\"\"\"\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n\n\"\"\"\nCalculates the RMS (root mean square) value of an array\n\"\"\"\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a**2 for a in arr)\n return (sq_sum/n)**0.5\n\n\n\n\"\"\"\nCalculates the L1 norm (Manhattan distance) between P1 = [x1, y1] and P2 = [x2, y2]\n\"\"\"\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n\n\"\"\"\nTurns x, y, o, v into a string of the form \"x, y, v, o\"\n\"\"\"\ndef state_to_str(x, y, v, o):\n return \"%d, %d, %d, %d\" % (x, y, v, o)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def lexicalOrder(self, n):
"""
:type n: int
:rtype: List[int]
"""
acc = []
self.backtrack(acc, 1, n)
return acc
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def lexicalOrder(self, n):
"""
:type n: int
:rtype: List[int]
"""
acc = []
self.backtrack(acc, 1, n)
return acc
def backtrack(self, acc, counter, n):
if counter > n:
return
elif len(acc) == n:
return
else:
acc.append(counter)
self.backtrack(acc, counter * 10, n)
if counter % 10 != 9:
self.backtrack(acc, counter + 1, n)
|
flexible
|
{
"blob_id": "79f4ede16628c6fbf37dfb4fe5afb8489c120f5a",
"index": 6597,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n <mask token>\n",
"step-3": "class Solution(object):\n\n def lexicalOrder(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n acc = []\n self.backtrack(acc, 1, n)\n return acc\n <mask token>\n",
"step-4": "class Solution(object):\n\n def lexicalOrder(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n acc = []\n self.backtrack(acc, 1, n)\n return acc\n\n def backtrack(self, acc, counter, n):\n if counter > n:\n return\n elif len(acc) == n:\n return\n else:\n acc.append(counter)\n self.backtrack(acc, counter * 10, n)\n if counter % 10 != 9:\n self.backtrack(acc, counter + 1, n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class B:
<|reserved_special_token_0|>
class C(B, A):
print('class C')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class A:
<|reserved_special_token_0|>
class B:
def m(self):
print('Class B')
class C(B, A):
print('class C')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class A:
def m(self):
print('Class A')
class B:
def m(self):
print('Class B')
class C(B, A):
print('class C')
<|reserved_special_token_0|>
obj1.m()
print(C.mro())
<|reserved_special_token_1|>
class A:
def m(self):
print('Class A')
class B:
def m(self):
print('Class B')
class C(B, A):
print('class C')
obj1 = C()
obj1.m()
print(C.mro())
<|reserved_special_token_1|>
class A():
def m(self):
print("Class A")
class B():
def m(self):
print("Class B")
class C(B, A):
print("class C")
obj1 = C()
obj1.m()
print(C.mro()) # Method Resolution Order based on convention of "OBJECT" super class
|
flexible
|
{
"blob_id": "3d59b8d6a34935ff332028443276f161430a981c",
"index": 9687,
"step-1": "<mask token>\n\n\nclass B:\n <mask token>\n\n\nclass C(B, A):\n print('class C')\n\n\n<mask token>\n",
"step-2": "class A:\n <mask token>\n\n\nclass B:\n\n def m(self):\n print('Class B')\n\n\nclass C(B, A):\n print('class C')\n\n\n<mask token>\n",
"step-3": "class A:\n\n def m(self):\n print('Class A')\n\n\nclass B:\n\n def m(self):\n print('Class B')\n\n\nclass C(B, A):\n print('class C')\n\n\n<mask token>\nobj1.m()\nprint(C.mro())\n",
"step-4": "class A:\n\n def m(self):\n print('Class A')\n\n\nclass B:\n\n def m(self):\n print('Class B')\n\n\nclass C(B, A):\n print('class C')\n\n\nobj1 = C()\nobj1.m()\nprint(C.mro())\n",
"step-5": "class A():\n def m(self):\n print(\"Class A\")\n\nclass B():\n def m(self):\n print(\"Class B\")\n\nclass C(B, A):\n print(\"class C\")\n\nobj1 = C()\n\nobj1.m()\n\nprint(C.mro()) # Method Resolution Order based on convention of \"OBJECT\" super class\n",
"step-ids": [
2,
4,
6,
7,
8
]
}
|
[
2,
4,
6,
7,
8
] |
from django.apps import AppConfig
class TimestechConfig(AppConfig):
name = 'TimesTech'
|
normal
|
{
"blob_id": "94f50e371ef65e86d0d2d40a3ed16946f8811be3",
"index": 2601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TimestechConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TimestechConfig(AppConfig):\n name = 'TimesTech'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass TimestechConfig(AppConfig):\n name = 'TimesTech'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import pandas as pd
import time
from sklearn.metrics import log_loss
from keras.models import Sequential, Model
from keras.layers import Dense, Input
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers.convolutional import Convolution3D
from keras.layers.convolutional import MaxPooling3D
from keras import backend as K
K.set_image_dim_ordering('th')
start_time = time.time()
#Random seed
np.random.seed(123)
#Load training data
much_data = np.load('muchdata-50-50-20.npy')
X_init = much_data[:,0]
y_init = much_data[:,1]
#Load test data
test_data = np.load('testdata-50-50-20.npy')
patient_order = np.load('testpatientorder.npy')
patient_order = list(patient_order)
IMG_PX_SIZE = 50
HM_SLICES = 20
X = np.zeros((len(X_init),HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE))
y = np.zeros((len(y_init),2))
for i in range(0,len(X_init)):
try:
X[i] = X_init[i]
y[i] = y_init[i]
except:
print("problem")
continue
print("done")
X_test = np.zeros((len(test_data),20,50,50))
y_test = np.zeros((len(test_data),1))
for i in range(0,len(test_data)):
try:
X_test[i] = test_data[i]
except:
print("problem_test")
continue
solution = pd.read_csv('stage1_solution.csv', index_col=0)
for ind, row in solution.iterrows():
n = patient_order.index(ind)
y_test[n] = row[0]
print("done")
#Reshape to [samples][channels][width][height]
X = X.reshape(X.shape[0],1,HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE).astype('float32')
X_test = X_test.reshape(X_test.shape[0],1,HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE).astype('float32')
def base_model():
input_shape=(1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE)
inputs = Input(shape=input_shape)
conv1 = Convolution3D(32, 5, 5, 5, activation='relu')(inputs)
drop1 = Dropout(0.2)(conv1)
conv2 = Convolution3D(32, 5, 5, 5, activation='relu')(drop1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
## conv3 = Convolution3D(64, 5, 5, 5, activation='relu')(pool1)
## drop2 = Dropout(0.2)(conv3)
## conv4 = Convolution3D(64, 5, 5, 5, activation='relu')(drop2)
## pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv4)
flatten = Flatten()(pool1)
## dense1 = Dense(1024, activation='tanh')(flatten)
## drop3 = Dropout(0.2)(dense1)
dense2 = Dense(512, activation='tanh')(flatten)
drop4 = Dropout(0.2)(dense2)
dense3 = Dense(128, activation='tanh')(drop4)
dense4 = Dense(2, activation='sigmoid')(dense3)
model = Model(input=inputs, output=dense4)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# build the model
model = base_model()
# Fit the model
model.fit(X, y, nb_epoch=20, batch_size=30,verbose=2)
model.summary()
#Prediction
predictions = model.predict(test_data, verbose=1)
logloss = log_loss(y_test,predictions)
print(logloss)
print("Total time: {} seconds".format(time.time() - start_time))
|
normal
|
{
"blob_id": "e3d886dedaf5b120392d0dc81c4c71398f08f8d6",
"index": 8234,
"step-1": "<mask token>\n\n\ndef base_model():\n input_shape = 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE\n inputs = Input(shape=input_shape)\n conv1 = Convolution3D(32, 5, 5, 5, activation='relu')(inputs)\n drop1 = Dropout(0.2)(conv1)\n conv2 = Convolution3D(32, 5, 5, 5, activation='relu')(drop1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n flatten = Flatten()(pool1)\n dense2 = Dense(512, activation='tanh')(flatten)\n drop4 = Dropout(0.2)(dense2)\n dense3 = Dense(128, activation='tanh')(drop4)\n dense4 = Dense(2, activation='sigmoid')(dense3)\n model = Model(input=inputs, output=dense4)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\nK.set_image_dim_ordering('th')\n<mask token>\nnp.random.seed(123)\n<mask token>\nfor i in range(0, len(X_init)):\n try:\n X[i] = X_init[i]\n y[i] = y_init[i]\n except:\n print('problem')\n continue\nprint('done')\n<mask token>\nfor i in range(0, len(test_data)):\n try:\n X_test[i] = test_data[i]\n except:\n print('problem_test')\n continue\n<mask token>\nfor ind, row in solution.iterrows():\n n = patient_order.index(ind)\n y_test[n] = row[0]\nprint('done')\n<mask token>\n\n\ndef base_model():\n input_shape = 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE\n inputs = Input(shape=input_shape)\n conv1 = Convolution3D(32, 5, 5, 5, activation='relu')(inputs)\n drop1 = Dropout(0.2)(conv1)\n conv2 = Convolution3D(32, 5, 5, 5, activation='relu')(drop1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n flatten = Flatten()(pool1)\n dense2 = Dense(512, activation='tanh')(flatten)\n drop4 = Dropout(0.2)(dense2)\n dense3 = Dense(128, activation='tanh')(drop4)\n dense4 = Dense(2, activation='sigmoid')(dense3)\n model = Model(input=inputs, output=dense4)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\n<mask token>\nmodel.fit(X, y, nb_epoch=20, batch_size=30, verbose=2)\nmodel.summary()\n<mask token>\nprint(logloss)\nprint('Total time: {} seconds'.format(time.time() - start_time))\n",
"step-3": "<mask token>\nK.set_image_dim_ordering('th')\nstart_time = time.time()\nnp.random.seed(123)\nmuch_data = np.load('muchdata-50-50-20.npy')\nX_init = much_data[:, 0]\ny_init = much_data[:, 1]\ntest_data = np.load('testdata-50-50-20.npy')\npatient_order = np.load('testpatientorder.npy')\npatient_order = list(patient_order)\nIMG_PX_SIZE = 50\nHM_SLICES = 20\nX = np.zeros((len(X_init), HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE))\ny = np.zeros((len(y_init), 2))\nfor i in range(0, len(X_init)):\n try:\n X[i] = X_init[i]\n y[i] = y_init[i]\n except:\n print('problem')\n continue\nprint('done')\nX_test = np.zeros((len(test_data), 20, 50, 50))\ny_test = np.zeros((len(test_data), 1))\nfor i in range(0, len(test_data)):\n try:\n X_test[i] = test_data[i]\n except:\n print('problem_test')\n continue\nsolution = pd.read_csv('stage1_solution.csv', index_col=0)\nfor ind, row in solution.iterrows():\n n = patient_order.index(ind)\n y_test[n] = row[0]\nprint('done')\nX = X.reshape(X.shape[0], 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE).astype(\n 'float32')\nX_test = X_test.reshape(X_test.shape[0], 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE\n ).astype('float32')\n\n\ndef base_model():\n input_shape = 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE\n inputs = Input(shape=input_shape)\n conv1 = Convolution3D(32, 5, 5, 5, activation='relu')(inputs)\n drop1 = Dropout(0.2)(conv1)\n conv2 = Convolution3D(32, 5, 5, 5, activation='relu')(drop1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n flatten = Flatten()(pool1)\n dense2 = Dense(512, activation='tanh')(flatten)\n drop4 = Dropout(0.2)(dense2)\n dense3 = Dense(128, activation='tanh')(drop4)\n dense4 = Dense(2, activation='sigmoid')(dense3)\n model = Model(input=inputs, output=dense4)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\nmodel = base_model()\nmodel.fit(X, y, nb_epoch=20, batch_size=30, verbose=2)\nmodel.summary()\npredictions = model.predict(test_data, verbose=1)\nlogloss = log_loss(y_test, predictions)\nprint(logloss)\nprint('Total time: {} seconds'.format(time.time() - start_time))\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport time\nfrom sklearn.metrics import log_loss\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Input\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import LSTM\nfrom keras.layers.convolutional import Convolution3D\nfrom keras.layers.convolutional import MaxPooling3D\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\nstart_time = time.time()\nnp.random.seed(123)\nmuch_data = np.load('muchdata-50-50-20.npy')\nX_init = much_data[:, 0]\ny_init = much_data[:, 1]\ntest_data = np.load('testdata-50-50-20.npy')\npatient_order = np.load('testpatientorder.npy')\npatient_order = list(patient_order)\nIMG_PX_SIZE = 50\nHM_SLICES = 20\nX = np.zeros((len(X_init), HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE))\ny = np.zeros((len(y_init), 2))\nfor i in range(0, len(X_init)):\n try:\n X[i] = X_init[i]\n y[i] = y_init[i]\n except:\n print('problem')\n continue\nprint('done')\nX_test = np.zeros((len(test_data), 20, 50, 50))\ny_test = np.zeros((len(test_data), 1))\nfor i in range(0, len(test_data)):\n try:\n X_test[i] = test_data[i]\n except:\n print('problem_test')\n continue\nsolution = pd.read_csv('stage1_solution.csv', index_col=0)\nfor ind, row in solution.iterrows():\n n = patient_order.index(ind)\n y_test[n] = row[0]\nprint('done')\nX = X.reshape(X.shape[0], 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE).astype(\n 'float32')\nX_test = X_test.reshape(X_test.shape[0], 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE\n ).astype('float32')\n\n\ndef base_model():\n input_shape = 1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE\n inputs = Input(shape=input_shape)\n conv1 = Convolution3D(32, 5, 5, 5, activation='relu')(inputs)\n drop1 = Dropout(0.2)(conv1)\n conv2 = Convolution3D(32, 5, 5, 5, activation='relu')(drop1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n flatten = Flatten()(pool1)\n dense2 = Dense(512, activation='tanh')(flatten)\n drop4 = Dropout(0.2)(dense2)\n dense3 = Dense(128, activation='tanh')(drop4)\n dense4 = Dense(2, activation='sigmoid')(dense3)\n model = Model(input=inputs, output=dense4)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n return model\n\n\nmodel = base_model()\nmodel.fit(X, y, nb_epoch=20, batch_size=30, verbose=2)\nmodel.summary()\npredictions = model.predict(test_data, verbose=1)\nlogloss = log_loss(y_test, predictions)\nprint(logloss)\nprint('Total time: {} seconds'.format(time.time() - start_time))\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport time\nfrom sklearn.metrics import log_loss\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Input\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import LSTM\nfrom keras.layers.convolutional import Convolution3D\nfrom keras.layers.convolutional import MaxPooling3D\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\nstart_time = time.time()\n#Random seed\nnp.random.seed(123)\n#Load training data\nmuch_data = np.load('muchdata-50-50-20.npy')\nX_init = much_data[:,0]\ny_init = much_data[:,1]\n#Load test data\ntest_data = np.load('testdata-50-50-20.npy')\npatient_order = np.load('testpatientorder.npy')\npatient_order = list(patient_order)\n\nIMG_PX_SIZE = 50\nHM_SLICES = 20\n\nX = np.zeros((len(X_init),HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE))\ny = np.zeros((len(y_init),2))\nfor i in range(0,len(X_init)):\n try:\n X[i] = X_init[i]\n y[i] = y_init[i]\n except:\n print(\"problem\")\n continue\n\nprint(\"done\")\nX_test = np.zeros((len(test_data),20,50,50))\ny_test = np.zeros((len(test_data),1))\nfor i in range(0,len(test_data)):\n try:\n X_test[i] = test_data[i]\n except:\n print(\"problem_test\")\n continue\n\nsolution = pd.read_csv('stage1_solution.csv', index_col=0)\nfor ind, row in solution.iterrows():\n n = patient_order.index(ind)\n y_test[n] = row[0]\nprint(\"done\")\n\n#Reshape to [samples][channels][width][height]\nX = X.reshape(X.shape[0],1,HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE).astype('float32')\nX_test = X_test.reshape(X_test.shape[0],1,HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE).astype('float32')\n\ndef base_model():\n input_shape=(1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE)\n inputs = Input(shape=input_shape)\n conv1 = Convolution3D(32, 5, 5, 5, activation='relu')(inputs)\n drop1 = Dropout(0.2)(conv1)\n conv2 = Convolution3D(32, 5, 5, 5, activation='relu')(drop1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n## conv3 = Convolution3D(64, 5, 5, 5, activation='relu')(pool1)\n## drop2 = Dropout(0.2)(conv3)\n## conv4 = Convolution3D(64, 5, 5, 5, activation='relu')(drop2)\n## pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv4)\n flatten = Flatten()(pool1)\n## dense1 = Dense(1024, activation='tanh')(flatten)\n## drop3 = Dropout(0.2)(dense1)\n dense2 = Dense(512, activation='tanh')(flatten)\n drop4 = Dropout(0.2)(dense2)\n dense3 = Dense(128, activation='tanh')(drop4)\n dense4 = Dense(2, activation='sigmoid')(dense3)\n model = Model(input=inputs, output=dense4)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n \n# build the model\nmodel = base_model()\n# Fit the model\nmodel.fit(X, y, nb_epoch=20, batch_size=30,verbose=2)\nmodel.summary()\n#Prediction\npredictions = model.predict(test_data, verbose=1)\n\nlogloss = log_loss(y_test,predictions)\n\nprint(logloss)\n\nprint(\"Total time: {} seconds\".format(time.time() - start_time))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import ludwig.schema.decoders.base
import ludwig.schema.decoders.sequence_decoders
<|reserved_special_token_1|>
# Register all decoders
import ludwig.schema.decoders.base
import ludwig.schema.decoders.sequence_decoders # noqa
|
flexible
|
{
"blob_id": "53509d826b82211bac02ea5f545802007b06781c",
"index": 1630,
"step-1": "<mask token>\n",
"step-2": "import ludwig.schema.decoders.base\nimport ludwig.schema.decoders.sequence_decoders\n",
"step-3": "# Register all decoders\nimport ludwig.schema.decoders.base\nimport ludwig.schema.decoders.sequence_decoders # noqa\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
__author__ = 'Or'
|
normal
|
{
"blob_id": "54c1b294d826deb43978591cad590c5e969bebd7",
"index": 6655,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'Or'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def haversine(pt1, pt2):
"""
INPUT: tuples (lon1, lat1), (lon2, lat2)
OUTPUT: The great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
c = 2 * asin(sqrt(a))
r = 6371
return c * r
<|reserved_special_token_0|>
def distance_along_route(pt_1_ind, pt_2_ind, shape):
d1 = shape.loc[pt_1_ind]['shape_dist_traveled']
d2 = shape.loc[pt_2_ind]['shape_dist_traveled']
return d2 - d1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def haversine(pt1, pt2):
"""
INPUT: tuples (lon1, lat1), (lon2, lat2)
OUTPUT: The great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
c = 2 * asin(sqrt(a))
r = 6371
return c * r
def get_closest_shape_pt(lat, lon, shape):
dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[
'shape_pt_lat']), (lon, lat)), axis=1)
return dist.argmin()
def distance_along_route(pt_1_ind, pt_2_ind, shape):
d1 = shape.loc[pt_1_ind]['shape_dist_traveled']
d2 = shape.loc[pt_2_ind]['shape_dist_traveled']
return d2 - d1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def haversine(pt1, pt2):
"""
INPUT: tuples (lon1, lat1), (lon2, lat2)
OUTPUT: The great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
c = 2 * asin(sqrt(a))
r = 6371
return c * r
def get_closest_shape_pt(lat, lon, shape):
dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[
'shape_pt_lat']), (lon, lat)), axis=1)
return dist.argmin()
def distance_along_route(pt_1_ind, pt_2_ind, shape):
d1 = shape.loc[pt_1_ind]['shape_dist_traveled']
d2 = shape.loc[pt_2_ind]['shape_dist_traveled']
return d2 - d1
def distance_from_segment(pt, seg_pt_1, seg_pt_2):
c = haversine(seg_pt_1, seg_pt_2)
b = haversine(seg_pt_1, pt)
a = haversine(seg_pt_2, pt)
num1 = b ** 2 + c ** 2 - a ** 2
num2 = a ** 2 + c ** 2 - b ** 2
if num1 < 0 or num2 < 0:
return min(a, b)
theta = acos(num1 / (2.0 * b * c))
h = b * sin(theta)
return h
<|reserved_special_token_1|>
from datetime import datetime as dt
from math import radians, cos, sin, acos, asin, sqrt
import networkx as nx
def haversine(pt1, pt2):
"""
INPUT: tuples (lon1, lat1), (lon2, lat2)
OUTPUT: The great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
c = 2 * asin(sqrt(a))
r = 6371
return c * r
def get_closest_shape_pt(lat, lon, shape):
dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[
'shape_pt_lat']), (lon, lat)), axis=1)
return dist.argmin()
def distance_along_route(pt_1_ind, pt_2_ind, shape):
d1 = shape.loc[pt_1_ind]['shape_dist_traveled']
d2 = shape.loc[pt_2_ind]['shape_dist_traveled']
return d2 - d1
def distance_from_segment(pt, seg_pt_1, seg_pt_2):
c = haversine(seg_pt_1, seg_pt_2)
b = haversine(seg_pt_1, pt)
a = haversine(seg_pt_2, pt)
num1 = b ** 2 + c ** 2 - a ** 2
num2 = a ** 2 + c ** 2 - b ** 2
if num1 < 0 or num2 < 0:
return min(a, b)
theta = acos(num1 / (2.0 * b * c))
h = b * sin(theta)
return h
<|reserved_special_token_1|>
#grabbed the following from moses marsh -- https://github.com/sidetrackedmind/gimme-bus/blob/master/gimmebus/utilities.py
from datetime import datetime as dt
from math import radians, cos, sin, acos, asin, sqrt
import networkx as nx
## These functions will go in model.py for matching historical GPS
## positions to the defined route shapes
def haversine(pt1, pt2):
"""
INPUT: tuples (lon1, lat1), (lon2, lat2)
OUTPUT: The great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2.)**2 + cos(lat1) * cos(lat2) * sin(dlon/2.)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def get_closest_shape_pt(lat, lon, shape):
dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], \
x['shape_pt_lat']), (lon, lat)), axis=1)
return dist.argmin()
def distance_along_route(pt_1_ind, pt_2_ind, shape):
d1 = shape.loc[pt_1_ind]['shape_dist_traveled']
d2 = shape.loc[pt_2_ind]['shape_dist_traveled']
return d2 - d1
def distance_from_segment(pt, seg_pt_1, seg_pt_2):
c = haversine(seg_pt_1, seg_pt_2)
b = haversine(seg_pt_1, pt)
a = haversine(seg_pt_2, pt)
num1 = (b**2 + c**2 - a**2)
num2 = (a**2 + c**2 - b**2)
if (num1 < 0) or (num2 < 0):
return min(a, b)
theta = acos( num1 / (2.*b*c))
h = b * sin(theta)
return h
|
flexible
|
{
"blob_id": "89ce3d3ec9691ab8f54cc0d9d008e06c65b5f2cc",
"index": 7847,
"step-1": "<mask token>\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\n<mask token>\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[\n 'shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[\n 'shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\ndef distance_from_segment(pt, seg_pt_1, seg_pt_2):\n c = haversine(seg_pt_1, seg_pt_2)\n b = haversine(seg_pt_1, pt)\n a = haversine(seg_pt_2, pt)\n num1 = b ** 2 + c ** 2 - a ** 2\n num2 = a ** 2 + c ** 2 - b ** 2\n if num1 < 0 or num2 < 0:\n return min(a, b)\n theta = acos(num1 / (2.0 * b * c))\n h = b * sin(theta)\n return h\n",
"step-4": "from datetime import datetime as dt\nfrom math import radians, cos, sin, acos, asin, sqrt\nimport networkx as nx\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[\n 'shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\ndef distance_from_segment(pt, seg_pt_1, seg_pt_2):\n c = haversine(seg_pt_1, seg_pt_2)\n b = haversine(seg_pt_1, pt)\n a = haversine(seg_pt_2, pt)\n num1 = b ** 2 + c ** 2 - a ** 2\n num2 = a ** 2 + c ** 2 - b ** 2\n if num1 < 0 or num2 < 0:\n return min(a, b)\n theta = acos(num1 / (2.0 * b * c))\n h = b * sin(theta)\n return h\n",
"step-5": "#grabbed the following from moses marsh -- https://github.com/sidetrackedmind/gimme-bus/blob/master/gimmebus/utilities.py\n\nfrom datetime import datetime as dt\nfrom math import radians, cos, sin, acos, asin, sqrt\nimport networkx as nx\n\n## These functions will go in model.py for matching historical GPS\n## positions to the defined route shapes\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2.)**2 + cos(lat1) * cos(lat2) * sin(dlon/2.)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], \\\n x['shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\ndef distance_from_segment(pt, seg_pt_1, seg_pt_2):\n c = haversine(seg_pt_1, seg_pt_2)\n b = haversine(seg_pt_1, pt)\n a = haversine(seg_pt_2, pt)\n\n num1 = (b**2 + c**2 - a**2)\n num2 = (a**2 + c**2 - b**2)\n\n if (num1 < 0) or (num2 < 0):\n return min(a, b)\n\n theta = acos( num1 / (2.*b*c))\n h = b * sin(theta)\n\n return h\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import nevergrad as ng
import numpy as np
import torch
from pix2latent.utils.image import binarize
class _BaseNevergradOptimizer():
"""
Base template for NeverGrad optimization. Should be used jointly with
BaseOptimizer.
For full list of available optimizers
> https://github.com/facebookresearch/nevergrad
or ...
> print(self.valid_methods)
Args:
method: nevergrad optimization method
NOTE:
nevergrad CMA have been observed to perform wrose than the original
codebase. use with warning. nevergrad has a perk of being optimized
in parallel, hence batch-size can be arbitrarily chosen.
"""
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
# this is not an exhaustive list
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, \
f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)#.set_mutation(sigma=sigma)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[(var_dict['var_type'], var_name)] = ng_opt
assert len(self.ng_optimizers.keys()) == 1, \
'currently only a single input variable can be optimized via '+\
'Nevergrad but got: {}'.format(self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = \
torch.Tensor(d).data.type_as(
vars[var_type][var_name].data[i].data)
self._sampled[(var_type, var_name)] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[(var_type, var_name)]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = \
self.var_manager.variable_info['target']['var_type']
weight_type = \
self.var_manager.variable_info['weight']['var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
|
normal
|
{
"blob_id": "4a136a6284add3bcbd7f9546e18e79151cea685f",
"index": 623,
"step-1": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n <mask token>\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-3": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n\n @torch.no_grad()\n def ng_init(self, var_manager, num_samples):\n \"\"\"\n Args\n var_manager (VariableManger): instance of the variable manager\n num_samples (int): number of samples for mini-batch optimization\n \"\"\"\n if self.is_sequential:\n vars = var_manager.initialize(num_seeds=1)\n num_samples = 1\n else:\n vars = var_manager.initialize(num_samples=num_samples)\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\n _ng_data = np.concatenate([x.args for x in ng_data])\n for i, d in enumerate(_ng_data):\n vars[var_type][var_name].data[i].data = torch.Tensor(d\n ).data.type_as(vars[var_type][var_name].data[i].data)\n self._sampled[var_type, var_name] = ng_data\n return vars\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-4": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n \"\"\"\n Base template for NeverGrad optimization. Should be used jointly with\n BaseOptimizer.\n\n For full list of available optimizers\n > https://github.com/facebookresearch/nevergrad\n\n or ...\n > print(self.valid_methods)\n\n Args:\n method: nevergrad optimization method\n\n NOTE:\n nevergrad CMA have been observed to perform wrose than the original\n codebase. use with warning. nevergrad has a perk of being optimized\n in parallel, hence batch-size can be arbitrarily chosen.\n \"\"\"\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n\n @torch.no_grad()\n def ng_init(self, var_manager, num_samples):\n \"\"\"\n Args\n var_manager (VariableManger): instance of the variable manager\n num_samples (int): number of samples for mini-batch optimization\n \"\"\"\n if self.is_sequential:\n vars = var_manager.initialize(num_seeds=1)\n num_samples = 1\n else:\n vars = var_manager.initialize(num_samples=num_samples)\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\n _ng_data = np.concatenate([x.args for x in ng_data])\n for i, d in enumerate(_ng_data):\n vars[var_type][var_name].data[i].data = torch.Tensor(d\n ).data.type_as(vars[var_type][var_name].data[i].data)\n self._sampled[var_type, var_name] = ng_data\n return vars\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-5": "import nevergrad as ng\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom pix2latent.utils.image import binarize\r\n\r\n\r\n\r\nclass _BaseNevergradOptimizer():\r\n \"\"\"\r\n Base template for NeverGrad optimization. Should be used jointly with\r\n BaseOptimizer.\r\n\r\n For full list of available optimizers\r\n > https://github.com/facebookresearch/nevergrad\r\n\r\n or ...\r\n > print(self.valid_methods)\r\n\r\n Args:\r\n method: nevergrad optimization method\r\n\r\n NOTE:\r\n nevergrad CMA have been observed to perform wrose than the original\r\n codebase. use with warning. nevergrad has a perk of being optimized\r\n in parallel, hence batch-size can be arbitrarily chosen.\r\n \"\"\"\r\n\r\n def __init__(self, method):\r\n\r\n self.method = method\r\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\r\n\r\n # this is not an exhaustive list\r\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\r\n self.is_sequential = self.method in self.sequential_methods\r\n\r\n if self.is_sequential:\r\n seq_msg = '{} is a sequential method. batch size is set to 1'\r\n cprint(seq_msg.format(self.method), 'y')\r\n\r\n assert self.method in self.valid_methods, \\\r\n f'unknown nevergrad method: {self.method}'\r\n\r\n self.ng_optimizers = {}\r\n self._sampled = {}\r\n return\r\n\r\n\r\n @torch.no_grad()\r\n def setup_ng(self, var_manager, budget):\r\n \"\"\"\r\n initializes NeverGrad optimizer.\r\n\r\n Args\r\n var_manager (VariableManger): instance of the variable manager\r\n budget (int): number of optimization iteration.\r\n \"\"\"\r\n\r\n for var_name, var_dict in var_manager.variable_info.items():\r\n\r\n if var_dict['grad_free'] is False:\r\n continue\r\n\r\n if type(var_dict['grad_free']) == tuple:\r\n mu, sigma = var_dict['grad_free']\r\n\r\n if mu is None:\r\n mu = np.zeros(var_dict['shape'])\r\n\r\n if sigma is None:\r\n sigma = 1.\r\n\r\n cma_opt = CMA(mu, sigma=sigma)\r\n\r\n else:\r\n mu = np.zeros(var_dict['shape'])\r\n sigma = 1.0\r\n\r\n opt_fn = ng.optimizers.registry[self.method]\r\n p = ng.p.Array(init=mu)#.set_mutation(sigma=sigma)\r\n ng_opt = opt_fn(parametrization=p, budget=budget)\r\n\r\n self.ng_optimizers[(var_dict['var_type'], var_name)] = ng_opt\r\n\r\n assert len(self.ng_optimizers.keys()) == 1, \\\r\n 'currently only a single input variable can be optimized via '+\\\r\n 'Nevergrad but got: {}'.format(self.ng_optimizers.keys())\r\n return\r\n\r\n\r\n @torch.no_grad()\r\n def ng_init(self, var_manager, num_samples):\r\n \"\"\"\r\n Args\r\n var_manager (VariableManger): instance of the variable manager\r\n num_samples (int): number of samples for mini-batch optimization\r\n \"\"\"\r\n if self.is_sequential:\r\n vars = var_manager.initialize(num_seeds=1)\r\n num_samples = 1\r\n else:\r\n vars = var_manager.initialize(num_samples=num_samples)\r\n\r\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\r\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\r\n\r\n _ng_data = np.concatenate([x.args for x in ng_data])\r\n\r\n for i, d in enumerate(_ng_data):\r\n vars[var_type][var_name].data[i].data = \\\r\n torch.Tensor(d).data.type_as(\r\n vars[var_type][var_name].data[i].data)\r\n\r\n self._sampled[(var_type, var_name)] = ng_data\r\n\r\n return vars\r\n\r\n\r\n @torch.no_grad()\r\n def ng_update(self, variables, loss=None, inverted_loss=False):\r\n\r\n \"\"\"\r\n Updates NG distribution either with the provided loss or loss that\r\n is recomputed.\r\n\r\n Args:\r\n variables (dict): a dictionary instance generated from the\r\n variable manager.\r\n loss (array or list): a 1-dimensional array or list consisting of\r\n losses corresponding to each sample. If the loss is not\r\n provided, uses the variables to recompute the loss.\r\n [Default: None]\r\n inverted_loss (bool): if True, the loss is computed after inverting\r\n the generated images back to the original target. For example\r\n this is used to compute the loss on the original target.\r\n [Default: False]\r\n \"\"\"\r\n\r\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\r\n\r\n ng_data = self._sampled[(var_type, var_name)]\r\n\r\n if loss is None:\r\n out, loss, _ = self.step(variables, optimize=False)\r\n\r\n if inverted_loss and hasattr(variables, 'transform'):\r\n\r\n target_type = \\\r\n self.var_manager.variable_info['target']['var_type']\r\n weight_type = \\\r\n self.var_manager.variable_info['weight']['var_type']\r\n\r\n target = self.var_manager.variable_info['target']['default']\r\n weight = self.var_manager.variable_info['weight']['default']\r\n\r\n target = target.unsqueeze(0).type_as(out)\r\n weight = weight.unsqueeze(0).type_as(out)\r\n\r\n t_fn = self.transform_fns['target']['fn']\r\n t_param = torch.stack(variables.transform.t.data)\r\n out = t_fn(out, t_param, invert=True)\r\n\r\n loss = self.loss_fn(out, target, binarize(weight))\r\n loss = loss.cpu().detach().numpy()\r\n\r\n for d, l in zip(ng_data, loss):\r\n ng_opt.tell(d, l)\r\n\r\n return\r\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
assert len(sys.argv
) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'
s1_file = sys.argv[1]
s2_file = sys.argv[2]
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
print('LHS <: RHS', isSubschema(s1, s2))
print('RHS <: LHS', isSubschema(s2, s1))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
assert len(sys.argv
) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'
s1_file = sys.argv[1]
s2_file = sys.argv[2]
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
print('LHS <: RHS', isSubschema(s1, s2))
print('RHS <: LHS', isSubschema(s2, s1))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
import jsonref
import sys
from jsonsubschema.api import isSubschema
def main():
assert len(sys.argv
) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'
s1_file = sys.argv[1]
s2_file = sys.argv[2]
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
print('LHS <: RHS', isSubschema(s1, s2))
print('RHS <: LHS', isSubschema(s2, s1))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
'''
Created on June 24, 2019
@author: Andrew Habib
'''
import json
import jsonref
import sys
from jsonsubschema.api import isSubschema
def main():
assert len(
sys.argv) == 3, "jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema"
s1_file = sys.argv[1]
s2_file = sys.argv[2]
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
# s1 = jsonref.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
# s2 = jsonref.load(f2)
print("LHS <: RHS", isSubschema(s1, s2))
print("RHS <: LHS", isSubschema(s2, s1))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "ba78a1e29736c4f109a0efc6f5b9993994661058",
"index": 3527,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n assert len(sys.argv\n ) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n print('LHS <: RHS', isSubschema(s1, s2))\n print('RHS <: LHS', isSubschema(s2, s1))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n assert len(sys.argv\n ) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n print('LHS <: RHS', isSubschema(s1, s2))\n print('RHS <: LHS', isSubschema(s2, s1))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport json\nimport jsonref\nimport sys\nfrom jsonsubschema.api import isSubschema\n\n\ndef main():\n assert len(sys.argv\n ) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n print('LHS <: RHS', isSubschema(s1, s2))\n print('RHS <: LHS', isSubschema(s2, s1))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nCreated on June 24, 2019\n@author: Andrew Habib\n'''\n\nimport json\nimport jsonref\nimport sys\n\nfrom jsonsubschema.api import isSubschema\n\n\ndef main():\n\n assert len(\n sys.argv) == 3, \"jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema\"\n\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n # s1 = jsonref.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n # s2 = jsonref.load(f2)\n\n print(\"LHS <: RHS\", isSubschema(s1, s2))\n print(\"RHS <: LHS\", isSubschema(s2, s1))\n\n\nif __name__ == \"__main__\":\n\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from eboss_qso.fits.joint import run_joint_mcmc_fit
from eboss_qso.measurements.utils import make_hash
import os.path as osp
import os
from glob import glob
ARGS = [(False, 1.0),
(False, 1.6),
(True, 1.6),
(True, 1.0)
]
ITERATIONS = 500
WALKERS = 100
def main(argnum, kmin):
z_weighted, p = ARGS[argnum]
# the data to load
kws = {}
kws['version'] = 'v1.9f'
kws['krange'] = '%s-0.3' % kmin
kws['params'] = 'basemodel-N-fnl'
kws['zrange'] = '0.8-2.2'
kws['z_weighted'] = z_weighted
kws['p'] = p
kws['ells'] = [0]
hashstr = make_hash(kws)
# output directory
output = osp.join(os.environ['EBOSS_FITS'], 'data')
output = osp.join(output, kws['version'],
kws['krange'], kws['params'], kws['zrange'])
output = osp.join(output, 'QSO-N+S-%s' % hashstr)
if not osp.exists(output):
os.makedirs(output)
# output file name
i = len(glob(osp.join(output, '*npz')))
output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))
print(output)
# run
run_joint_mcmc_fit('data', ITERATIONS, WALKERS,
output, kws, joint_params=['f_nl'])
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("argnum", type=int, choices=[0, 1, 2, 3])
parser.add_argument('kmin', type=str, choices=["0.0001", "0.005"])
ns = parser.parse_args()
main(ns.argnum, ns.kmin)
|
normal
|
{
"blob_id": "a40c87fe4b805495e5bd30155faa861cbe16c368",
"index": 6123,
"step-1": "<mask token>\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('argnum', type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=['0.0001', '0.005'])\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-3": "<mask token>\nARGS = [(False, 1.0), (False, 1.6), (True, 1.6), (True, 1.0)]\nITERATIONS = 500\nWALKERS = 100\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('argnum', type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=['0.0001', '0.005'])\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-4": "from eboss_qso.fits.joint import run_joint_mcmc_fit\nfrom eboss_qso.measurements.utils import make_hash\nimport os.path as osp\nimport os\nfrom glob import glob\nARGS = [(False, 1.0), (False, 1.6), (True, 1.6), (True, 1.0)]\nITERATIONS = 500\nWALKERS = 100\n\n\ndef main(argnum, kmin):\n z_weighted, p = ARGS[argnum]\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n hashstr = make_hash(kws)\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'], kws['krange'], kws['params'],\n kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n if not osp.exists(output):\n os.makedirs(output)\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS, output, kws,\n joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('argnum', type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=['0.0001', '0.005'])\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-5": "from eboss_qso.fits.joint import run_joint_mcmc_fit\nfrom eboss_qso.measurements.utils import make_hash\nimport os.path as osp\nimport os\nfrom glob import glob\n\n\nARGS = [(False, 1.0),\n (False, 1.6),\n (True, 1.6),\n (True, 1.0)\n ]\nITERATIONS = 500\nWALKERS = 100\n\n\ndef main(argnum, kmin):\n\n z_weighted, p = ARGS[argnum]\n\n # the data to load\n kws = {}\n kws['version'] = 'v1.9f'\n kws['krange'] = '%s-0.3' % kmin\n kws['params'] = 'basemodel-N-fnl'\n kws['zrange'] = '0.8-2.2'\n kws['z_weighted'] = z_weighted\n kws['p'] = p\n kws['ells'] = [0]\n\n hashstr = make_hash(kws)\n\n # output directory\n output = osp.join(os.environ['EBOSS_FITS'], 'data')\n output = osp.join(output, kws['version'],\n kws['krange'], kws['params'], kws['zrange'])\n output = osp.join(output, 'QSO-N+S-%s' % hashstr)\n\n if not osp.exists(output):\n os.makedirs(output)\n\n # output file name\n i = len(glob(osp.join(output, '*npz')))\n output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))\n print(output)\n\n # run\n run_joint_mcmc_fit('data', ITERATIONS, WALKERS,\n output, kws, joint_params=['f_nl'])\n\n\nif __name__ == '__main__':\n\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\"argnum\", type=int, choices=[0, 1, 2, 3])\n parser.add_argument('kmin', type=str, choices=[\"0.0001\", \"0.005\"])\n\n ns = parser.parse_args()\n main(ns.argnum, ns.kmin)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_required
@csrf_exempt
def social(request):
if request.method == 'POST':
data = request.POST
project_id = int(json.loads(data.get('projid')))
head = data.get('head')
head = json.loads(head)
subhead = json.loads(data.get('subh'))
content = json.loads(data.get('cont'))
obtained = json.loads(data.get('pass'))
with connection.cursor() as curr:
curr.execute(
'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'
, [project_id])
rec_id = namedtuplefetchall(curr)
manager_id = rec_id[0].manager_id
customer_id = rec_id[0].customer_id
print('SENDING')
with connection.cursor() as curr:
curr.execute('select contact from customer where customer_id = %s',
[customer_id])
email = namedtuplefetchall(curr)
customer_email = email[0].contact
pwd = settings.EMAIL_HOST_PASSWORD
if encrypto.verify(obtained, pwd) == True:
send_mail(head, subhead + '\n' + content, 'Gauri Baraskar',
'gauribaraskar812@gmail.com', settings.EMAIL_HOST_USER,
obtained)
else:
messages.warning(request, 'Wrong Password Entered')
return JsonResponse(1, safe=False)
else:
with connection.cursor() as curr:
curr.execute(
'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'
, [request.user.id])
res = namedtuplefetchall(curr)
return render(request, 'social/index.html', {'social': res})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from projects.models import Project
from django.db import connection
from .utils import namedtuplefetchall
from django.http import JsonResponse
from django.contrib import messages
import json
from django.views.decorators.csrf import csrf_exempt
from .utils import send_mail
from DBMS import settings
from passlib.hash import pbkdf2_sha256 as encrypto
@login_required
@csrf_exempt
def social(request):
if request.method == 'POST':
data = request.POST
project_id = int(json.loads(data.get('projid')))
head = data.get('head')
head = json.loads(head)
subhead = json.loads(data.get('subh'))
content = json.loads(data.get('cont'))
obtained = json.loads(data.get('pass'))
with connection.cursor() as curr:
curr.execute(
'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'
, [project_id])
rec_id = namedtuplefetchall(curr)
manager_id = rec_id[0].manager_id
customer_id = rec_id[0].customer_id
print('SENDING')
with connection.cursor() as curr:
curr.execute('select contact from customer where customer_id = %s',
[customer_id])
email = namedtuplefetchall(curr)
customer_email = email[0].contact
pwd = settings.EMAIL_HOST_PASSWORD
if encrypto.verify(obtained, pwd) == True:
send_mail(head, subhead + '\n' + content, 'Gauri Baraskar',
'gauribaraskar812@gmail.com', settings.EMAIL_HOST_USER,
obtained)
else:
messages.warning(request, 'Wrong Password Entered')
return JsonResponse(1, safe=False)
else:
with connection.cursor() as curr:
curr.execute(
'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'
, [request.user.id])
res = namedtuplefetchall(curr)
return render(request, 'social/index.html', {'social': res})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
from projects.models import Project
from django.db import connection
from .utils import namedtuplefetchall
from django.http import JsonResponse
from django.contrib import messages
import json
from django.views.decorators.csrf import csrf_exempt
from .utils import send_mail
from DBMS import settings
from passlib.hash import pbkdf2_sha256 as encrypto
# Create your views here.
@login_required
@csrf_exempt
def social(request):
if request.method == "POST":
data = request.POST
project_id = int(json.loads(data.get('projid')))
head = data.get('head')
head = json.loads(head)
subhead = json.loads(data.get('subh'))
content = json.loads(data.get('cont'))
obtained = json.loads(data.get('pass'))
with connection.cursor() as curr:
curr.execute("SELECT manager_id,customer_id FROM socialMedia where project_id=%s",[project_id])
rec_id = namedtuplefetchall(curr)
manager_id = rec_id[0].manager_id
customer_id = rec_id[0].customer_id
print("SENDING")
with connection.cursor() as curr:
curr.execute("select contact from customer where customer_id = %s",[customer_id])
email = namedtuplefetchall(curr)
customer_email = email[0].contact
# Rename the email field with customer_email to send to customers when we have actual data
pwd = settings.EMAIL_HOST_PASSWORD
if encrypto.verify(obtained,pwd) == True:
#print("asjdhasd")
send_mail(head,subhead+'\n'+content,'Gauri Baraskar','gauribaraskar812@gmail.com',settings.EMAIL_HOST_USER,obtained)
else:
messages.warning(request,"Wrong Password Entered")
return JsonResponse(1,safe=False)
else:
with connection.cursor() as curr:
curr.execute("select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id",[request.user.id])
res = namedtuplefetchall(curr)
return render(request, 'social/index.html', {'social': res})
|
flexible
|
{
"blob_id": "c2839046592469dfae7526f72be947126960ba19",
"index": 621,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@login_required\n@csrf_exempt\ndef social(request):\n if request.method == 'POST':\n data = request.POST\n project_id = int(json.loads(data.get('projid')))\n head = data.get('head')\n head = json.loads(head)\n subhead = json.loads(data.get('subh'))\n content = json.loads(data.get('cont'))\n obtained = json.loads(data.get('pass'))\n with connection.cursor() as curr:\n curr.execute(\n 'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'\n , [project_id])\n rec_id = namedtuplefetchall(curr)\n manager_id = rec_id[0].manager_id\n customer_id = rec_id[0].customer_id\n print('SENDING')\n with connection.cursor() as curr:\n curr.execute('select contact from customer where customer_id = %s',\n [customer_id])\n email = namedtuplefetchall(curr)\n customer_email = email[0].contact\n pwd = settings.EMAIL_HOST_PASSWORD\n if encrypto.verify(obtained, pwd) == True:\n send_mail(head, subhead + '\\n' + content, 'Gauri Baraskar',\n 'gauribaraskar812@gmail.com', settings.EMAIL_HOST_USER,\n obtained)\n else:\n messages.warning(request, 'Wrong Password Entered')\n return JsonResponse(1, safe=False)\n else:\n with connection.cursor() as curr:\n curr.execute(\n 'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'\n , [request.user.id])\n res = namedtuplefetchall(curr)\n return render(request, 'social/index.html', {'social': res})\n",
"step-3": "from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom projects.models import Project\nfrom django.db import connection\nfrom .utils import namedtuplefetchall\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .utils import send_mail\nfrom DBMS import settings\nfrom passlib.hash import pbkdf2_sha256 as encrypto\n\n\n@login_required\n@csrf_exempt\ndef social(request):\n if request.method == 'POST':\n data = request.POST\n project_id = int(json.loads(data.get('projid')))\n head = data.get('head')\n head = json.loads(head)\n subhead = json.loads(data.get('subh'))\n content = json.loads(data.get('cont'))\n obtained = json.loads(data.get('pass'))\n with connection.cursor() as curr:\n curr.execute(\n 'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'\n , [project_id])\n rec_id = namedtuplefetchall(curr)\n manager_id = rec_id[0].manager_id\n customer_id = rec_id[0].customer_id\n print('SENDING')\n with connection.cursor() as curr:\n curr.execute('select contact from customer where customer_id = %s',\n [customer_id])\n email = namedtuplefetchall(curr)\n customer_email = email[0].contact\n pwd = settings.EMAIL_HOST_PASSWORD\n if encrypto.verify(obtained, pwd) == True:\n send_mail(head, subhead + '\\n' + content, 'Gauri Baraskar',\n 'gauribaraskar812@gmail.com', settings.EMAIL_HOST_USER,\n obtained)\n else:\n messages.warning(request, 'Wrong Password Entered')\n return JsonResponse(1, safe=False)\n else:\n with connection.cursor() as curr:\n curr.execute(\n 'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'\n , [request.user.id])\n res = namedtuplefetchall(curr)\n return render(request, 'social/index.html', {'social': res})\n",
"step-4": "from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\nfrom projects.models import Project\nfrom django.db import connection\nfrom .utils import namedtuplefetchall\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .utils import send_mail\n\nfrom DBMS import settings\n\nfrom passlib.hash import pbkdf2_sha256 as encrypto\n\n# Create your views here.\n\n@login_required\n@csrf_exempt\ndef social(request):\n if request.method == \"POST\":\n data = request.POST\n project_id = int(json.loads(data.get('projid')))\n head = data.get('head')\n head = json.loads(head)\n subhead = json.loads(data.get('subh'))\n content = json.loads(data.get('cont'))\n obtained = json.loads(data.get('pass'))\n with connection.cursor() as curr:\n curr.execute(\"SELECT manager_id,customer_id FROM socialMedia where project_id=%s\",[project_id])\n rec_id = namedtuplefetchall(curr)\n manager_id = rec_id[0].manager_id\n customer_id = rec_id[0].customer_id\n print(\"SENDING\")\n\n with connection.cursor() as curr:\n curr.execute(\"select contact from customer where customer_id = %s\",[customer_id])\n email = namedtuplefetchall(curr)\n customer_email = email[0].contact\n\n # Rename the email field with customer_email to send to customers when we have actual data\n\n pwd = settings.EMAIL_HOST_PASSWORD\n if encrypto.verify(obtained,pwd) == True:\n #print(\"asjdhasd\")\n send_mail(head,subhead+'\\n'+content,'Gauri Baraskar','gauribaraskar812@gmail.com',settings.EMAIL_HOST_USER,obtained)\n else:\n messages.warning(request,\"Wrong Password Entered\")\n return JsonResponse(1,safe=False)\n\n else:\n with connection.cursor() as curr:\n curr.execute(\"select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id\",[request.user.id])\n res = namedtuplefetchall(curr)\n return render(request, 'social/index.html', {'social': res})\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.2.5 on 2019-10-09 12:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0002_customer_employee_lead_manager'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core.Product')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('created', models.DateTimeField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='core.Product')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('estimated', models.DateTimeField()),
('reported', models.DateTimeField()),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='users.Employee')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='core.Ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('file', models.FileField(upload_to='')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='core.Ticket')),
],
),
]
|
normal
|
{
"blob_id": "5485fe4f612ededc11e3a96dfd546e97a56cbe2a",
"index": 3316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('users', '0002_customer_employee_lead_manager')]\n operations = [migrations.CreateModel(name='Product', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 255, unique=True)), ('lft', models.PositiveIntegerField(editable=\n False)), ('rght', models.PositiveIntegerField(editable=False)), (\n 'tree_id', models.PositiveIntegerField(db_index=True, editable=\n False)), ('level', models.PositiveIntegerField(editable=False)), (\n 'parent', mptt.fields.TreeForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'children', to='core.Product'))], options={'abstract': False}),\n migrations.CreateModel(name='Ticket', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('name', models.CharField(max_length=255)), (\n 'description', models.CharField(max_length=255)), ('state', models.\n CharField(max_length=255)), ('created', models.DateTimeField()), (\n 'product', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='tickets', to='core.Product'))]), migrations.\n CreateModel(name='Task', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('description', models.CharField(max_length=255)), ('state',\n models.CharField(max_length=255)), ('estimated', models.\n DateTimeField()), ('reported', models.DateTimeField()), ('employee',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='tasks', to='users.Employee'))]), migrations.\n CreateModel(name='Comment', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('text', models.TextField()), ('ticket', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to='core.Ticket')), ('user', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to=settings.AUTH_USER_MODEL))]), migrations.CreateModel\n (name='Attachment', fields=[('id', models.AutoField(auto_created=\n True, primary_key=True, serialize=False, verbose_name='ID')), (\n 'name', models.CharField(max_length=255)), ('file', models.\n FileField(upload_to='')), ('ticket', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='attachments', to=\n 'core.Ticket'))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('users', '0002_customer_employee_lead_manager')]\n operations = [migrations.CreateModel(name='Product', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 255, unique=True)), ('lft', models.PositiveIntegerField(editable=\n False)), ('rght', models.PositiveIntegerField(editable=False)), (\n 'tree_id', models.PositiveIntegerField(db_index=True, editable=\n False)), ('level', models.PositiveIntegerField(editable=False)), (\n 'parent', mptt.fields.TreeForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'children', to='core.Product'))], options={'abstract': False}),\n migrations.CreateModel(name='Ticket', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('name', models.CharField(max_length=255)), (\n 'description', models.CharField(max_length=255)), ('state', models.\n CharField(max_length=255)), ('created', models.DateTimeField()), (\n 'product', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='tickets', to='core.Product'))]), migrations.\n CreateModel(name='Task', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('description', models.CharField(max_length=255)), ('state',\n models.CharField(max_length=255)), ('estimated', models.\n DateTimeField()), ('reported', models.DateTimeField()), ('employee',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='tasks', to='users.Employee'))]), migrations.\n CreateModel(name='Comment', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('text', models.TextField()), ('ticket', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to='core.Ticket')), ('user', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to=settings.AUTH_USER_MODEL))]), migrations.CreateModel\n (name='Attachment', fields=[('id', models.AutoField(auto_created=\n True, primary_key=True, serialize=False, verbose_name='ID')), (\n 'name', models.CharField(max_length=255)), ('file', models.\n FileField(upload_to='')), ('ticket', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='attachments', to=\n 'core.Ticket'))])]\n",
"step-5": "# Generated by Django 2.2.5 on 2019-10-09 12:06\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('users', '0002_customer_employee_lead_manager'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255, unique=True)),\n ('lft', models.PositiveIntegerField(editable=False)),\n ('rght', models.PositiveIntegerField(editable=False)),\n ('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),\n ('level', models.PositiveIntegerField(editable=False)),\n ('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core.Product')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Ticket',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('description', models.CharField(max_length=255)),\n ('state', models.CharField(max_length=255)),\n ('created', models.DateTimeField()),\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='core.Product')),\n ],\n ),\n migrations.CreateModel(\n name='Task',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('description', models.CharField(max_length=255)),\n ('state', models.CharField(max_length=255)),\n ('estimated', models.DateTimeField()),\n ('reported', models.DateTimeField()),\n ('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='users.Employee')),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('text', models.TextField()),\n ('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='core.Ticket')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Attachment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('file', models.FileField(upload_to='')),\n ('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='core.Ticket')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
if n == 0:
nums1 = nums1
if nums1[m-1] <= nums2[0]:
for i in range(n):
nums1[m+i] = nums2[i]
elif nums1[0] >= nums2[-1]:
for i in range(m):
nums1[i] = nums1[n+i]
else:
ans = [None]*len(nums1)
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
print("take 1: ", nums1[i])
ans[k] = nums1[i]
i += 1
else:
print("take 2: ", nums2[j])
ans[k] = nums2[j]
j += 1
k += 1
nums1 = ans
if __name__ == "__main__":
solve = Solution()
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
solve.merge(nums1, m, nums2, n)
print(nums1)
|
normal
|
{
"blob_id": "4f13e2858d9cf469f14026808142886e5c3fcc85",
"index": 28,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if n == 0:\n nums1 = nums1\n if nums1[m - 1] <= nums2[0]:\n for i in range(n):\n nums1[m + i] = nums2[i]\n elif nums1[0] >= nums2[-1]:\n for i in range(m):\n nums1[i] = nums1[n + i]\n else:\n ans = [None] * len(nums1)\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print('take 1: ', nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print('take 2: ', nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n nums1 = ans\n\n\n<mask token>\n",
"step-4": "class Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if n == 0:\n nums1 = nums1\n if nums1[m - 1] <= nums2[0]:\n for i in range(n):\n nums1[m + i] = nums2[i]\n elif nums1[0] >= nums2[-1]:\n for i in range(m):\n nums1[i] = nums1[n + i]\n else:\n ans = [None] * len(nums1)\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print('take 1: ', nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print('take 2: ', nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n nums1 = ans\n\n\nif __name__ == '__main__':\n solve = Solution()\n nums1 = [1, 2, 3, 0, 0, 0]\n m = 3\n nums2 = [2, 5, 6]\n n = 3\n solve.merge(nums1, m, nums2, n)\n print(nums1)\n",
"step-5": "class Solution:\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n \n if n == 0:\n nums1 = nums1\n if nums1[m-1] <= nums2[0]:\n \n for i in range(n):\n nums1[m+i] = nums2[i]\n \n elif nums1[0] >= nums2[-1]:\n \n for i in range(m):\n nums1[i] = nums1[n+i]\n else:\n ans = [None]*len(nums1)\n i = 0\n j = 0\n k = 0\n \n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n print(\"take 1: \", nums1[i])\n ans[k] = nums1[i]\n i += 1\n else:\n print(\"take 2: \", nums2[j])\n ans[k] = nums2[j]\n j += 1\n k += 1\n\n nums1 = ans\n\nif __name__ == \"__main__\":\n solve = Solution()\n nums1 = [1,2,3,0,0,0]\n m = 3\n nums2 = [2,5,6]\n n = 3\n solve.merge(nums1, m, nums2, n)\n print(nums1)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def primeiras_ocorrencias(str):
dic = {}
for i, letra in enumerate(str):
if letra not in dic:
dic[letra] = i
return dic
|
flexible
|
{
"blob_id": "bb1a6815649eb9e79e2ab1e110ea8acd8adce5aa",
"index": 3379,
"step-1": "<mask token>\n",
"step-2": "def primeiras_ocorrencias(str):\n dic = {}\n for i, letra in enumerate(str):\n if letra not in dic:\n dic[letra] = i\n return dic\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 18:05:44 2018
@author: Administrator
"""
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
iris = load_iris()
log_reg = LogisticRegression()
score = cross_val_score(log_reg, iris.data, iris.target,cv=10)
print("cross-vali score is: {}".format(score.mean()))
import mglearn
#mglearn.plots.plot_stratified_cross_validation()
kfold = StratifiedKFold(n_splits=5, shuffle=True)
for train_index, test_index in kfold.split(iris.data, iris.target):
print(train_index, test_index)
from sklearn.svm import SVC
def simple_grid(iris, kfold):
X_train,X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.3,random_state = 0)
best_score = 0
para_list = [0.001, 0.01, 0.1, 1, 10]
for gamma in para_list:
for C in para_list:
svm = SVC(gamma=gamma, C=C)
#svm.fit(X_train, y_train)
scores = cross_val_score(svm, iris.data, iris.target,cv=kfold)
score = scores.mean()
if score > best_score:
best_score = score
best_para = {'C':C, 'gamma':gamma}
print("best score is {:.2f}".format(best_score))
print("best parameters is {}".format(best_para))
score = cross_val_score(svm, iris.data, iris.target,cv=kfold)
print("CV-score is {}".format(score.mean(0)))
return best_para
para = simple_grid(iris, kfold)
para_grid = {"C":[0.001, 0.01, 0.1, 1, 10],
'gamma':[0.001, 0.01, 0.1, 1, 10]}
grid_search = GridSearchCV(SVC(), para_grid, cv = kfold)
X_train,X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.3,random_state = 0)
grid_search.fit(X_train, y_train)
print("best grid score is {:.2f}".format(grid_search.score(X_test,
y_test)))
import pandas as pd
results = pd.DataFrame(grid_search.cv_results_)
display(results.head())
print(cross_val_score(GridSearchCV(SVC(), para_grid, cv = kfold),
X_train,y_train, cv = kfold).mean())
y_pred = grid_search.predict(X_test,y_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
|
normal
|
{
"blob_id": "aaa0ac5e31e2c10b5baba6077e952fff1a92ef82",
"index": 882,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('cross-vali score is: {}'.format(score.mean()))\n<mask token>\nfor train_index, test_index in kfold.split(iris.data, iris.target):\n print(train_index, test_index)\n<mask token>\n\n\ndef simple_grid(iris, kfold):\n X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.\n target, test_size=0.3, random_state=0)\n best_score = 0\n para_list = [0.001, 0.01, 0.1, 1, 10]\n for gamma in para_list:\n for C in para_list:\n svm = SVC(gamma=gamma, C=C)\n scores = cross_val_score(svm, iris.data, iris.target, cv=kfold)\n score = scores.mean()\n if score > best_score:\n best_score = score\n best_para = {'C': C, 'gamma': gamma}\n print('best score is {:.2f}'.format(best_score))\n print('best parameters is {}'.format(best_para))\n score = cross_val_score(svm, iris.data, iris.target, cv=kfold)\n print('CV-score is {}'.format(score.mean(0)))\n return best_para\n\n\n<mask token>\ngrid_search.fit(X_train, y_train)\nprint('best grid score is {:.2f}'.format(grid_search.score(X_test, y_test)))\n<mask token>\ndisplay(results.head())\nprint(cross_val_score(GridSearchCV(SVC(), para_grid, cv=kfold), X_train,\n y_train, cv=kfold).mean())\n<mask token>\nprint(classification_report(y_test, y_pred))\n",
"step-3": "<mask token>\niris = load_iris()\nlog_reg = LogisticRegression()\nscore = cross_val_score(log_reg, iris.data, iris.target, cv=10)\nprint('cross-vali score is: {}'.format(score.mean()))\n<mask token>\nkfold = StratifiedKFold(n_splits=5, shuffle=True)\nfor train_index, test_index in kfold.split(iris.data, iris.target):\n print(train_index, test_index)\n<mask token>\n\n\ndef simple_grid(iris, kfold):\n X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.\n target, test_size=0.3, random_state=0)\n best_score = 0\n para_list = [0.001, 0.01, 0.1, 1, 10]\n for gamma in para_list:\n for C in para_list:\n svm = SVC(gamma=gamma, C=C)\n scores = cross_val_score(svm, iris.data, iris.target, cv=kfold)\n score = scores.mean()\n if score > best_score:\n best_score = score\n best_para = {'C': C, 'gamma': gamma}\n print('best score is {:.2f}'.format(best_score))\n print('best parameters is {}'.format(best_para))\n score = cross_val_score(svm, iris.data, iris.target, cv=kfold)\n print('CV-score is {}'.format(score.mean(0)))\n return best_para\n\n\npara = simple_grid(iris, kfold)\npara_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1, 10]\n }\ngrid_search = GridSearchCV(SVC(), para_grid, cv=kfold)\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,\n test_size=0.3, random_state=0)\ngrid_search.fit(X_train, y_train)\nprint('best grid score is {:.2f}'.format(grid_search.score(X_test, y_test)))\n<mask token>\nresults = pd.DataFrame(grid_search.cv_results_)\ndisplay(results.head())\nprint(cross_val_score(GridSearchCV(SVC(), para_grid, cv=kfold), X_train,\n y_train, cv=kfold).mean())\ny_pred = grid_search.predict(X_test, y_test)\n<mask token>\nprint(classification_report(y_test, y_pred))\n",
"step-4": "<mask token>\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import GridSearchCV\niris = load_iris()\nlog_reg = LogisticRegression()\nscore = cross_val_score(log_reg, iris.data, iris.target, cv=10)\nprint('cross-vali score is: {}'.format(score.mean()))\nimport mglearn\nkfold = StratifiedKFold(n_splits=5, shuffle=True)\nfor train_index, test_index in kfold.split(iris.data, iris.target):\n print(train_index, test_index)\nfrom sklearn.svm import SVC\n\n\ndef simple_grid(iris, kfold):\n X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.\n target, test_size=0.3, random_state=0)\n best_score = 0\n para_list = [0.001, 0.01, 0.1, 1, 10]\n for gamma in para_list:\n for C in para_list:\n svm = SVC(gamma=gamma, C=C)\n scores = cross_val_score(svm, iris.data, iris.target, cv=kfold)\n score = scores.mean()\n if score > best_score:\n best_score = score\n best_para = {'C': C, 'gamma': gamma}\n print('best score is {:.2f}'.format(best_score))\n print('best parameters is {}'.format(best_para))\n score = cross_val_score(svm, iris.data, iris.target, cv=kfold)\n print('CV-score is {}'.format(score.mean(0)))\n return best_para\n\n\npara = simple_grid(iris, kfold)\npara_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1, 10]\n }\ngrid_search = GridSearchCV(SVC(), para_grid, cv=kfold)\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,\n test_size=0.3, random_state=0)\ngrid_search.fit(X_train, y_train)\nprint('best grid score is {:.2f}'.format(grid_search.score(X_test, y_test)))\nimport pandas as pd\nresults = pd.DataFrame(grid_search.cv_results_)\ndisplay(results.head())\nprint(cross_val_score(GridSearchCV(SVC(), para_grid, cv=kfold), X_train,\n y_train, cv=kfold).mean())\ny_pred = grid_search.predict(X_test, y_test)\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, y_pred))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 22 18:05:44 2018\n\n@author: Administrator\n\"\"\"\n\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import GridSearchCV\n\niris = load_iris()\nlog_reg = LogisticRegression()\n\nscore = cross_val_score(log_reg, iris.data, iris.target,cv=10)\nprint(\"cross-vali score is: {}\".format(score.mean()))\n\nimport mglearn\n#mglearn.plots.plot_stratified_cross_validation()\n\nkfold = StratifiedKFold(n_splits=5, shuffle=True)\nfor train_index, test_index in kfold.split(iris.data, iris.target):\n print(train_index, test_index)\n \nfrom sklearn.svm import SVC\n\ndef simple_grid(iris, kfold):\n X_train,X_test, y_train, y_test = train_test_split(\n iris.data, iris.target, test_size=0.3,random_state = 0)\n best_score = 0\n para_list = [0.001, 0.01, 0.1, 1, 10]\n for gamma in para_list:\n for C in para_list:\n svm = SVC(gamma=gamma, C=C)\n #svm.fit(X_train, y_train)\n scores = cross_val_score(svm, iris.data, iris.target,cv=kfold)\n score = scores.mean()\n \n if score > best_score:\n best_score = score\n best_para = {'C':C, 'gamma':gamma}\n print(\"best score is {:.2f}\".format(best_score))\n print(\"best parameters is {}\".format(best_para))\n score = cross_val_score(svm, iris.data, iris.target,cv=kfold)\n \n print(\"CV-score is {}\".format(score.mean(0)))\n return best_para\n\npara = simple_grid(iris, kfold)\n\npara_grid = {\"C\":[0.001, 0.01, 0.1, 1, 10],\n 'gamma':[0.001, 0.01, 0.1, 1, 10]}\ngrid_search = GridSearchCV(SVC(), para_grid, cv = kfold)\nX_train,X_test, y_train, y_test = train_test_split(\n iris.data, iris.target, test_size=0.3,random_state = 0)\n\ngrid_search.fit(X_train, y_train)\nprint(\"best grid score is {:.2f}\".format(grid_search.score(X_test,\n y_test)))\n\nimport pandas as pd\nresults = pd.DataFrame(grid_search.cv_results_)\ndisplay(results.head())\n\nprint(cross_val_score(GridSearchCV(SVC(), para_grid, cv = kfold),\n X_train,y_train, cv = kfold).mean())\ny_pred = grid_search.predict(X_test,y_test)\n\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, y_pred))",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):
img = Image.open(file)
x, y = img.size
box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -
crop_bottom)
crop = img.crop(box)
crop.save(file)
def create_empty_folder(path):
"""Create a folder. Delete content if exists"""
Path(path).mkdir(parents=True, exist_ok=True)
existing_files = find_files_ignore_case(os.path.join(path, '*'))
for ef in existing_files:
os.remove(ef)
<|reserved_special_token_0|>
def get_file_name_prefix(filename):
with open('file_name_prefixes.txt') as f:
for line in f:
line = line.strip()
if filename.lower().startswith(line.lower()):
return line.strip()
return None
<|reserved_special_token_0|>
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_files_ignore_case(which, where='.'):
"""Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive."""
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
return [name for name in os.listdir(where) if rule.match(name)]
def crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):
img = Image.open(file)
x, y = img.size
box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -
crop_bottom)
crop = img.crop(box)
crop.save(file)
def create_empty_folder(path):
"""Create a folder. Delete content if exists"""
Path(path).mkdir(parents=True, exist_ok=True)
existing_files = find_files_ignore_case(os.path.join(path, '*'))
for ef in existing_files:
os.remove(ef)
def convert_pdf_to_images(file):
"""Convert a PDF file into images and save to folder of same name
Return folder which contains the images
"""
folder = os.path.splitext(file)[0]
create_empty_folder(folder)
images = convert_from_path(file)
for i, image in enumerate(images):
file_name = 'Z{:05}.jpg'.format(i + 1)
image.save(os.path.join(folder, file_name), 'JPEG')
return folder
def get_file_name_prefix(filename):
with open('file_name_prefixes.txt') as f:
for line in f:
line = line.strip()
if filename.lower().startswith(line.lower()):
return line.strip()
return None
<|reserved_special_token_0|>
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_files_ignore_case(which, where='.'):
"""Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive."""
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
return [name for name in os.listdir(where) if rule.match(name)]
def crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):
img = Image.open(file)
x, y = img.size
box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -
crop_bottom)
crop = img.crop(box)
crop.save(file)
def create_empty_folder(path):
"""Create a folder. Delete content if exists"""
Path(path).mkdir(parents=True, exist_ok=True)
existing_files = find_files_ignore_case(os.path.join(path, '*'))
for ef in existing_files:
os.remove(ef)
def convert_pdf_to_images(file):
"""Convert a PDF file into images and save to folder of same name
Return folder which contains the images
"""
folder = os.path.splitext(file)[0]
create_empty_folder(folder)
images = convert_from_path(file)
for i, image in enumerate(images):
file_name = 'Z{:05}.jpg'.format(i + 1)
image.save(os.path.join(folder, file_name), 'JPEG')
return folder
def get_file_name_prefix(filename):
with open('file_name_prefixes.txt') as f:
for line in f:
line = line.strip()
if filename.lower().startswith(line.lower()):
return line.strip()
return None
<|reserved_special_token_0|>
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
if __name__ == '__main__':
cur_folder = os.path.abspath('')
print('Convert PDFs to images...')
files = find_files_ignore_case('*.pdf')
for pdf_file in files:
pdf_file = os.path.join(cur_folder, pdf_file)
print(pdf_file)
folder = convert_pdf_to_images(pdf_file)
print('Crop images...')
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print(folder)
images = find_files_ignore_case('*.jpg', folder)
images.sort()
print(images)
for image_file in images:
try:
image_file = os.path.join(folder, image_file)
crop_image_center(image_file, crop_left=160, crop_right=-40,
crop_top=100, crop_bottom=20)
except:
pass
files = find_files_ignore_case('*.pdf')
for file in files:
print(file)
folder = os.path.splitext(file)[0]
file_prefix = get_file_name_prefix(file)
print(file_prefix)
source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix),
'Reference')
for f in source_files:
f = os.path.join('Reference', f)
shutil.copy(f, folder)
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
word_file = folder + '.docx'
file_prefix = get_file_name_prefix(file)
files = find_files_ignore_case('{}*.docx'.format(file_prefix),
'Reference')
print(file, file_prefix, files)
if files:
document = Document(os.path.join('Reference', files[0]))
document.add_section()
else:
document = Document()
document.save(word_file)
section = document.sections[0]
height = (section.page_height - section.top_margin - section.
bottom_margin)
images = find_files_ignore_case('*.jpg', folder)
for image_file in images:
image_file = os.path.join(folder, image_file)
document.add_picture(image_file, height=height)
document.save(word_file)
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print('Deleting', folder, os.path.isdir(folder))
try:
files_in_dir = os.listdir(folder)
for file in files_in_dir:
os.remove(os.path.join(folder, file))
shutil.rmtree(folder, ignore_errors=False, onerror=
handleRemoveReadonly)
except Exception as ex:
print('Error deleting', folder, ex)
<|reserved_special_token_1|>
from PIL import Image
from pdf2image import convert_from_path
import glob
from pathlib import Path
import shutil, os
from docx import Document
import fnmatch
import re
import shutil
def find_files_ignore_case(which, where='.'):
"""Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive."""
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
return [name for name in os.listdir(where) if rule.match(name)]
def crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):
img = Image.open(file)
x, y = img.size
box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -
crop_bottom)
crop = img.crop(box)
crop.save(file)
def create_empty_folder(path):
"""Create a folder. Delete content if exists"""
Path(path).mkdir(parents=True, exist_ok=True)
existing_files = find_files_ignore_case(os.path.join(path, '*'))
for ef in existing_files:
os.remove(ef)
def convert_pdf_to_images(file):
"""Convert a PDF file into images and save to folder of same name
Return folder which contains the images
"""
folder = os.path.splitext(file)[0]
create_empty_folder(folder)
images = convert_from_path(file)
for i, image in enumerate(images):
file_name = 'Z{:05}.jpg'.format(i + 1)
image.save(os.path.join(folder, file_name), 'JPEG')
return folder
def get_file_name_prefix(filename):
with open('file_name_prefixes.txt') as f:
for line in f:
line = line.strip()
if filename.lower().startswith(line.lower()):
return line.strip()
return None
import errno, os, stat, shutil
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
if __name__ == '__main__':
cur_folder = os.path.abspath('')
print('Convert PDFs to images...')
files = find_files_ignore_case('*.pdf')
for pdf_file in files:
pdf_file = os.path.join(cur_folder, pdf_file)
print(pdf_file)
folder = convert_pdf_to_images(pdf_file)
print('Crop images...')
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print(folder)
images = find_files_ignore_case('*.jpg', folder)
images.sort()
print(images)
for image_file in images:
try:
image_file = os.path.join(folder, image_file)
crop_image_center(image_file, crop_left=160, crop_right=-40,
crop_top=100, crop_bottom=20)
except:
pass
files = find_files_ignore_case('*.pdf')
for file in files:
print(file)
folder = os.path.splitext(file)[0]
file_prefix = get_file_name_prefix(file)
print(file_prefix)
source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix),
'Reference')
for f in source_files:
f = os.path.join('Reference', f)
shutil.copy(f, folder)
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
word_file = folder + '.docx'
file_prefix = get_file_name_prefix(file)
files = find_files_ignore_case('{}*.docx'.format(file_prefix),
'Reference')
print(file, file_prefix, files)
if files:
document = Document(os.path.join('Reference', files[0]))
document.add_section()
else:
document = Document()
document.save(word_file)
section = document.sections[0]
height = (section.page_height - section.top_margin - section.
bottom_margin)
images = find_files_ignore_case('*.jpg', folder)
for image_file in images:
image_file = os.path.join(folder, image_file)
document.add_picture(image_file, height=height)
document.save(word_file)
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print('Deleting', folder, os.path.isdir(folder))
try:
files_in_dir = os.listdir(folder)
for file in files_in_dir:
os.remove(os.path.join(folder, file))
shutil.rmtree(folder, ignore_errors=False, onerror=
handleRemoveReadonly)
except Exception as ex:
print('Error deleting', folder, ex)
<|reserved_special_token_1|>
from PIL import Image
from pdf2image import convert_from_path
import glob
from pathlib import Path
import shutil, os
from docx import Document
import fnmatch
import re
import shutil
def find_files_ignore_case(which, where='.'):
'''Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive.'''
# TODO: recursive param with walk() filtering
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
return [name for name in os.listdir(where) if rule.match(name)]
def crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):
img = Image.open(file)
x, y = img.size
box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top - crop_bottom)
crop = img.crop(box)
crop.save(file)
def create_empty_folder(path):
'''Create a folder. Delete content if exists'''
Path(path).mkdir(parents=True, exist_ok=True)
# Remove existing files
existing_files = find_files_ignore_case(os.path.join(path, '*'))
for ef in existing_files:
os.remove(ef)
def convert_pdf_to_images(file):
'''Convert a PDF file into images and save to folder of same name
Return folder which contains the images
'''
# Create directory for each file
folder = os.path.splitext(file)[0]
create_empty_folder(folder)
# Convert PDF to images into the directory
images = convert_from_path(file)
for i, image in enumerate(images):
file_name = 'Z{:05}.jpg'.format(i+1)
image.save(os.path.join(folder, file_name), 'JPEG')
return folder
def get_file_name_prefix(filename):
with open('file_name_prefixes.txt') as f:
for line in f:
line = line.strip()
if filename.lower().startswith(line.lower()):
return line.strip()
return None
import errno, os, stat, shutil
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
if __name__ == '__main__':
cur_folder = os.path.abspath('')
# Convert PDFs to Images
print('Convert PDFs to images...')
files = find_files_ignore_case('*.pdf')
for pdf_file in files:
pdf_file = os.path.join(cur_folder, pdf_file)
print(pdf_file)
folder = convert_pdf_to_images(pdf_file)
# Crop images
print('Crop images...')
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print(folder)
images = find_files_ignore_case('*.jpg', folder)
images.sort()
print(images)
for image_file in images:
try:
image_file = os.path.join(folder, image_file)
crop_image_center(image_file, crop_left=160,
crop_right=-40, crop_top=100, crop_bottom=20)
except:
pass
# Copy Image *.jpg From Reference to Folder
files = find_files_ignore_case('*.pdf')
for file in files:
print(file)
folder = os.path.splitext(file)[0]
file_prefix = get_file_name_prefix(file)
print(file_prefix)
# Copy Image *.jpg From Reference to Folder
source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix), 'Reference')
for f in source_files:
f = os.path.join('Reference', f)
shutil.copy(f, folder)
# Insert Images to Word
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
word_file = folder+".docx"
# Copy from template docx
file_prefix = get_file_name_prefix(file)
files = find_files_ignore_case('{}*.docx'.format(file_prefix), 'Reference')
print(file, file_prefix, files)
if files:
document = Document(os.path.join('Reference', files[0]))
document.add_section()
else:
document = Document()
document.save(word_file)
section = document.sections[0]
# width = section.page_width - section.left_margin - section.right_margin
height = section.page_height - section.top_margin - section.bottom_margin
images = find_files_ignore_case('*.jpg', folder)
for image_file in images:
image_file = os.path.join(folder, image_file)
# document.add_picture(image_file, width=width)
document.add_picture(image_file, height=height)
document.save(word_file)
# Delete folders including its images
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print('Deleting', folder, os.path.isdir(folder))
try:
files_in_dir = os.listdir(folder)
for file in files_in_dir: # loop to delete each file in folder
os.remove(os.path.join(folder,file))
#os.rmdir(folder)
shutil.rmtree(folder, ignore_errors=False, onerror=handleRemoveReadonly)
except Exception as ex:
print('Error deleting', folder, ex)
|
flexible
|
{
"blob_id": "a9876c61578a53f29865062c0915db622aaaba72",
"index": 6916,
"step-1": "<mask token>\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\n<mask token>\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\n<mask token>\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_files_ignore_case(which, where='.'):\n \"\"\"Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.\"\"\"\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\ndef convert_pdf_to_images(file):\n \"\"\"Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n \"\"\"\n folder = os.path.splitext(file)[0]\n create_empty_folder(folder)\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i + 1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n return folder\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\n<mask token>\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_files_ignore_case(which, where='.'):\n \"\"\"Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.\"\"\"\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\ndef convert_pdf_to_images(file):\n \"\"\"Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n \"\"\"\n folder = os.path.splitext(file)[0]\n create_empty_folder(folder)\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i + 1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n return folder\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\n<mask token>\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\nif __name__ == '__main__':\n cur_folder = os.path.abspath('')\n print('Convert PDFs to images...')\n files = find_files_ignore_case('*.pdf')\n for pdf_file in files:\n pdf_file = os.path.join(cur_folder, pdf_file)\n print(pdf_file)\n folder = convert_pdf_to_images(pdf_file)\n print('Crop images...')\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print(folder)\n images = find_files_ignore_case('*.jpg', folder)\n images.sort()\n print(images)\n for image_file in images:\n try:\n image_file = os.path.join(folder, image_file)\n crop_image_center(image_file, crop_left=160, crop_right=-40,\n crop_top=100, crop_bottom=20)\n except:\n pass\n files = find_files_ignore_case('*.pdf')\n for file in files:\n print(file)\n folder = os.path.splitext(file)[0]\n file_prefix = get_file_name_prefix(file)\n print(file_prefix)\n source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix),\n 'Reference')\n for f in source_files:\n f = os.path.join('Reference', f)\n shutil.copy(f, folder)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n word_file = folder + '.docx'\n file_prefix = get_file_name_prefix(file)\n files = find_files_ignore_case('{}*.docx'.format(file_prefix),\n 'Reference')\n print(file, file_prefix, files)\n if files:\n document = Document(os.path.join('Reference', files[0]))\n document.add_section()\n else:\n document = Document()\n document.save(word_file)\n section = document.sections[0]\n height = (section.page_height - section.top_margin - section.\n bottom_margin)\n images = find_files_ignore_case('*.jpg', folder)\n for image_file in images:\n image_file = os.path.join(folder, image_file)\n document.add_picture(image_file, height=height)\n document.save(word_file)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print('Deleting', folder, os.path.isdir(folder))\n try:\n files_in_dir = os.listdir(folder)\n for file in files_in_dir:\n os.remove(os.path.join(folder, file))\n shutil.rmtree(folder, ignore_errors=False, onerror=\n handleRemoveReadonly)\n except Exception as ex:\n print('Error deleting', folder, ex)\n",
"step-4": "from PIL import Image\nfrom pdf2image import convert_from_path\nimport glob\nfrom pathlib import Path\nimport shutil, os\nfrom docx import Document\nimport fnmatch\nimport re\nimport shutil\n\n\ndef find_files_ignore_case(which, where='.'):\n \"\"\"Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.\"\"\"\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\ndef convert_pdf_to_images(file):\n \"\"\"Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n \"\"\"\n folder = os.path.splitext(file)[0]\n create_empty_folder(folder)\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i + 1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n return folder\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\nimport errno, os, stat, shutil\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\nif __name__ == '__main__':\n cur_folder = os.path.abspath('')\n print('Convert PDFs to images...')\n files = find_files_ignore_case('*.pdf')\n for pdf_file in files:\n pdf_file = os.path.join(cur_folder, pdf_file)\n print(pdf_file)\n folder = convert_pdf_to_images(pdf_file)\n print('Crop images...')\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print(folder)\n images = find_files_ignore_case('*.jpg', folder)\n images.sort()\n print(images)\n for image_file in images:\n try:\n image_file = os.path.join(folder, image_file)\n crop_image_center(image_file, crop_left=160, crop_right=-40,\n crop_top=100, crop_bottom=20)\n except:\n pass\n files = find_files_ignore_case('*.pdf')\n for file in files:\n print(file)\n folder = os.path.splitext(file)[0]\n file_prefix = get_file_name_prefix(file)\n print(file_prefix)\n source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix),\n 'Reference')\n for f in source_files:\n f = os.path.join('Reference', f)\n shutil.copy(f, folder)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n word_file = folder + '.docx'\n file_prefix = get_file_name_prefix(file)\n files = find_files_ignore_case('{}*.docx'.format(file_prefix),\n 'Reference')\n print(file, file_prefix, files)\n if files:\n document = Document(os.path.join('Reference', files[0]))\n document.add_section()\n else:\n document = Document()\n document.save(word_file)\n section = document.sections[0]\n height = (section.page_height - section.top_margin - section.\n bottom_margin)\n images = find_files_ignore_case('*.jpg', folder)\n for image_file in images:\n image_file = os.path.join(folder, image_file)\n document.add_picture(image_file, height=height)\n document.save(word_file)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print('Deleting', folder, os.path.isdir(folder))\n try:\n files_in_dir = os.listdir(folder)\n for file in files_in_dir:\n os.remove(os.path.join(folder, file))\n shutil.rmtree(folder, ignore_errors=False, onerror=\n handleRemoveReadonly)\n except Exception as ex:\n print('Error deleting', folder, ex)\n",
"step-5": "from PIL import Image\nfrom pdf2image import convert_from_path\nimport glob \nfrom pathlib import Path\nimport shutil, os\nfrom docx import Document\nimport fnmatch\nimport re\nimport shutil\n\n\ndef find_files_ignore_case(which, where='.'):\n '''Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.'''\n \n # TODO: recursive param with walk() filtering\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top - crop_bottom)\n crop = img.crop(box) \n crop.save(file)\n\ndef create_empty_folder(path):\n '''Create a folder. Delete content if exists'''\n Path(path).mkdir(parents=True, exist_ok=True)\n \n # Remove existing files\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\ndef convert_pdf_to_images(file):\n '''Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n '''\n # Create directory for each file\n folder = os.path.splitext(file)[0] \n create_empty_folder(folder)\n \n # Convert PDF to images into the directory\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i+1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n\n return folder\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\nimport errno, os, stat, shutil\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777\n func(path)\n else:\n raise\n \n\nif __name__ == '__main__':\n cur_folder = os.path.abspath('')\n \n # Convert PDFs to Images\n print('Convert PDFs to images...')\n files = find_files_ignore_case('*.pdf')\n for pdf_file in files:\n pdf_file = os.path.join(cur_folder, pdf_file)\n print(pdf_file)\n folder = convert_pdf_to_images(pdf_file)\n \n # Crop images\n print('Crop images...')\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print(folder)\n images = find_files_ignore_case('*.jpg', folder)\n images.sort()\n print(images)\n for image_file in images:\n try:\n image_file = os.path.join(folder, image_file)\n crop_image_center(image_file, crop_left=160, \n crop_right=-40, crop_top=100, crop_bottom=20)\n except:\n pass\n \n # Copy Image *.jpg From Reference to Folder\n files = find_files_ignore_case('*.pdf')\n for file in files:\n print(file)\n folder = os.path.splitext(file)[0]\n file_prefix = get_file_name_prefix(file)\n print(file_prefix)\n\n # Copy Image *.jpg From Reference to Folder\n source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix), 'Reference')\n\n for f in source_files:\n f = os.path.join('Reference', f)\n shutil.copy(f, folder)\n \n # Insert Images to Word\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0] \n word_file = folder+\".docx\"\n\n # Copy from template docx\n file_prefix = get_file_name_prefix(file)\n files = find_files_ignore_case('{}*.docx'.format(file_prefix), 'Reference')\n print(file, file_prefix, files)\n if files:\n document = Document(os.path.join('Reference', files[0]))\n document.add_section()\n else:\n document = Document()\n document.save(word_file)\n\n section = document.sections[0]\n # width = section.page_width - section.left_margin - section.right_margin\n height = section.page_height - section.top_margin - section.bottom_margin\n\n images = find_files_ignore_case('*.jpg', folder)\n for image_file in images:\n image_file = os.path.join(folder, image_file)\n # document.add_picture(image_file, width=width)\n document.add_picture(image_file, height=height)\n\n document.save(word_file)\n \n # Delete folders including its images\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print('Deleting', folder, os.path.isdir(folder))\n try:\n files_in_dir = os.listdir(folder) \n for file in files_in_dir: # loop to delete each file in folder\n os.remove(os.path.join(folder,file))\n #os.rmdir(folder)\n shutil.rmtree(folder, ignore_errors=False, onerror=handleRemoveReadonly)\n except Exception as ex:\n print('Error deleting', folder, ex)\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Todo(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Todo(models.Model):
title = models.CharField(max_length=200)
completed = models.IntegerField(default=0)
<|reserved_special_token_1|>
from django.db import models
class Todo(models.Model):
title = models.CharField(max_length=200)
completed = models.IntegerField(default=0)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=200)
completed = models.IntegerField(default=0)
|
flexible
|
{
"blob_id": "4b075d8211d7047f6f08fe6f6f55e4703bdb6f1f",
"index": 3164,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Todo(models.Model):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n completed = models.IntegerField(default=0)\n",
"step-4": "from django.db import models\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n completed = models.IntegerField(default=0)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Todo(models.Model):\n\ttitle = models.CharField(max_length=200)\n\tcompleted = models.IntegerField(default=0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python3
"""
module that has
fucntions that
shows attributes
"""
def lookup(obj):
"""
function that returns attributes and methods of an object
"""
return(dir(obj))
|
normal
|
{
"blob_id": "67380fb8b1557b0ed6779009e5f9ae93fd81aedd",
"index": 8753,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lookup(obj):\n \"\"\"\n function that returns attributes and methods of an object\n \"\"\"\n return dir(obj)\n",
"step-3": "#!/usr/bin/python3\n\"\"\"\nmodule that has\nfucntions that\nshows attributes\n\"\"\"\n\n\ndef lookup(obj):\n \"\"\"\n function that returns attributes and methods of an object\n \"\"\"\n return(dir(obj))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def wrong_subtraction(n, k):
output = n
for i in range(k):
string_n = str(output)
if string_n[len(string_n) - 1] == '0':
output = int(string_n[:-1])
else:
output -= 1
return output
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def wrong_subtraction(n, k):
output = n
for i in range(k):
string_n = str(output)
if string_n[len(string_n) - 1] == '0':
output = int(string_n[:-1])
else:
output -= 1
return output
<|reserved_special_token_0|>
print(wrong_subtraction(n, k))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def wrong_subtraction(n, k):
output = n
for i in range(k):
string_n = str(output)
if string_n[len(string_n) - 1] == '0':
output = int(string_n[:-1])
else:
output -= 1
return output
a = list(map(int, input().split()))
n = a[0]
k = a[1]
print(wrong_subtraction(n, k))
<|reserved_special_token_1|>
import argparse
def wrong_subtraction(n, k):
output = n
for i in range(k):
string_n = str(output)
if string_n[len(string_n) - 1] == '0':
output = int(string_n[:-1])
else:
output -= 1
return output
a = list(map(int, input().split()))
n = a[0]
k = a[1]
print(wrong_subtraction(n, k))
<|reserved_special_token_1|>
import argparse
def wrong_subtraction(n, k):
output = n
for i in range(k):
string_n = str(output)
if string_n[len(string_n) - 1] == '0':
output = int(string_n[:-1])
else:
output -= 1
return output
# d = "Do the wrong subtraction as per https://codeforces.com/problemset/problem/977/A"
#
# parser = argparse.ArgumentParser(description=d)
#
# parser.add_argument("n", type=int, help="input value for n")
# parser.add_argument("k", type=int, help="input value for k")
#
# args = parser.parse_args()
#
# n = args.n
# k = args.k
a = list(map(int, input().split()))
n = a[0]
k = a[1]
print(wrong_subtraction(n, k))
|
flexible
|
{
"blob_id": "166a8cd0e09fbec739f43019659eeaf98b1d4fa4",
"index": 4446,
"step-1": "<mask token>\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\n<mask token>\nprint(wrong_subtraction(n, k))\n",
"step-3": "<mask token>\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\na = list(map(int, input().split()))\nn = a[0]\nk = a[1]\nprint(wrong_subtraction(n, k))\n",
"step-4": "import argparse\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\na = list(map(int, input().split()))\nn = a[0]\nk = a[1]\nprint(wrong_subtraction(n, k))\n",
"step-5": "import argparse\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n# d = \"Do the wrong subtraction as per https://codeforces.com/problemset/problem/977/A\"\n# \n# parser = argparse.ArgumentParser(description=d)\n# \n# parser.add_argument(\"n\", type=int, help=\"input value for n\")\n# parser.add_argument(\"k\", type=int, help=\"input value for k\")\n# \n# args = parser.parse_args()\n# \n# n = args.n\n# k = args.k\n\na = list(map(int, input().split()))\nn = a[0]\nk = a[1]\n\nprint(wrong_subtraction(n, k))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Simulador de sistema M/M/1.
#
# Variables de respuesta:
# - Demora promedio por cliente
# - Número promedio de clientes en cola
# - Utilización promedio de cliente
#
# Funciones:
# arribo()
# partida()
# nuevoEvento()
# medidasDesempeño()
# generarTiempoExponencial(t)
# generarHisotgrama(lista)
import numpy as np
import random
import math
import matplotlib.pyplot as plt
def arribo():
global reloj
global tiempoUltEvento
global estadoServ
global tiempoServicioTotal
global areaQ
global numCliEnCola
global cola
global tiempoLibre
global completaronDemora
# Siguiente arribo
listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)
if estadoServ == 0:
# Servidor pasa a 1, ocupado
estadoServ = 1
tiempoLibre += reloj - tiempoUltEvento
# Programo el próximo evento partida
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
# Actualizo la cantidad de clientes que completaron la demora
completaronDemora += 1
else:
# Acumulo el tiempo de servicio
tiempoServicioTotal += (reloj - tiempoUltEvento)
# Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += (numCliEnCola * (reloj - tiempoUltEvento))
numCliEnCola += 1
# Agrego el cliente a la cola
cola.append(reloj)
print(cola)
def partida():
global numCliEnCola
global tiempoServicioTotal
global areaQ
global demoraAcumulada
global completaronDemora
global estadoServ
global listaUsoServidores
if numCliEnCola > 0:
# Proxima partida
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
# Acumulo la demora acumulada como el valor actual del reloj
# menos el valor del reloj cuando el cliente ingresó a la cola
demoraAcumulada += reloj - cola[len(cola)-1]
# Actualizo el contador de clientes que completaron la demora
completaronDemora += 1
# Acumulo el tiempo de servicio
tiempoServicioTotal += (reloj - tiempoUltEvento)
# Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.
listaUsoServidores.append(tiempoServicioTotal/reloj)
# Calculo el Área bajo Q(t) del período anterior (Reloj - TiempoUltimoEvento)
areaQ += (numCliEnCola * (reloj - tiempoUltEvento))
numCliEnCola -= 1
# Saco el ultimo en llegar
cola.pop()
print(cola)
else:
# Al no haber clientes en cola, establezco el estado del servidor en "Desocupado"
estadoServ = 0
# Acumulo el tiempo de servicio
tiempoServicioTotal += (reloj - tiempoUltEvento)
# Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.
# listaUsoServidores.append(tiempoServicioTotal / reloj)
listaEventos[1] = 9999999.0
def generarHisotgrama(lista, tiempototal, reloj):
utilizacionProm=tiempototal/reloj
plt.title('Utilizacion promedio del servidor')
plt.plot(lista)
plt.xlabel("tiempo")
plt.ylabel("Utilizacion promedio")
plt.axhline(utilizacionProm, color='k', ls="dotted", xmax=1) # Comando para linea horizontal constante
plt.ylim(0, 1) # Limites para el eje Y
plt.xlim(0, len(lista)) # Limites para el eje X
plt.show()
def medidasDesempeño():
global listaUsoServidores
global reloj
global tiempoLibre
global areaQ
global tiempoServicioTotal
global demoraAcumulada
global completaronDemora
print("Medidas de desempeño de la simulación: ")
print("TIEMPO LIBRE DEL SERVIDOR %s" % tiempoLibre)
print("PORCENTAJE DE TIEMPO LIBRE %s" % (tiempoLibre / reloj))
print()
print("El reloj quedo en ", reloj)
var1 = areaQ / reloj
print("Nro promedio de cli en cola:", var1)
var2 = tiempoServicioTotal / reloj
print("Utilización promedio de los servidores:", var2)
var3 = demoraAcumulada / completaronDemora
print("Demora promedio por cliente:", var3)
generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)
def generarTiempoExponencial(media):
# return np.random.exponential(media)
return -(1/media) * math.log(random.random())
def nuevoEvento():
global reloj
global proximoEvento
global listaEventos
if listaEventos[0] <= listaEventos[1]:
reloj = listaEventos[0]
proximoEvento = "ARRIBO"
else:
reloj = listaEventos[1]
proximoEvento = "PARTIDA"
#Inicio del programa principal
#Tiempo de arribo y servicio del modelo:
tiempoEntreArribos = 7
tiempoDeServicio = 9
#Inicializacion de variables
reloj = 0.0
estadoServ = 0
tiempoServicioTotal = 0.0
tiempoLibre = 0.0
demoraAcumulada = 0.0
proximoEvento = ""
listaEventos = []
cola = []
numCliEnCola = 0
areaQ = 0.0
tiempoUltEvento = 0.0
completaronDemora = 0
listaUsoServidores = []
# Tiempo primer evento (arribo)
listaEventos.append(generarTiempoExponencial(tiempoEntreArribos))
#
# Infinito, ya que todavia no hay clientes en el sistema
listaEventos.append(9999999.0)
while True:
nuevoEvento()
# Llamada a la rutina correspondiente en función del tipo de evento
if proximoEvento == "ARRIBO":
arribo()
else:
partida()
tiempoUltEvento = reloj
if reloj >= 1000:
break
medidasDesempeño()
|
normal
|
{
"blob_id": "62cc731982846f08b3f3caace5df1bfafd421869",
"index": 1701,
"step-1": "<mask token>\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\n<mask token>\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\nlistaEventos.append(9999999.0)\nwhile True:\n nuevoEvento()\n if proximoEvento == 'ARRIBO':\n arribo()\n else:\n partida()\n tiempoUltEvento = reloj\n if reloj >= 1000:\n break\nmedidasDesempeño()\n",
"step-3": "<mask token>\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\ntiempoEntreArribos = 7\ntiempoDeServicio = 9\nreloj = 0.0\nestadoServ = 0\ntiempoServicioTotal = 0.0\ntiempoLibre = 0.0\ndemoraAcumulada = 0.0\nproximoEvento = ''\nlistaEventos = []\ncola = []\nnumCliEnCola = 0\nareaQ = 0.0\ntiempoUltEvento = 0.0\ncompletaronDemora = 0\nlistaUsoServidores = []\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\nlistaEventos.append(9999999.0)\nwhile True:\n nuevoEvento()\n if proximoEvento == 'ARRIBO':\n arribo()\n else:\n partida()\n tiempoUltEvento = reloj\n if reloj >= 1000:\n break\nmedidasDesempeño()\n",
"step-4": "import numpy as np\nimport random\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\ntiempoEntreArribos = 7\ntiempoDeServicio = 9\nreloj = 0.0\nestadoServ = 0\ntiempoServicioTotal = 0.0\ntiempoLibre = 0.0\ndemoraAcumulada = 0.0\nproximoEvento = ''\nlistaEventos = []\ncola = []\nnumCliEnCola = 0\nareaQ = 0.0\ntiempoUltEvento = 0.0\ncompletaronDemora = 0\nlistaUsoServidores = []\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\nlistaEventos.append(9999999.0)\nwhile True:\n nuevoEvento()\n if proximoEvento == 'ARRIBO':\n arribo()\n else:\n partida()\n tiempoUltEvento = reloj\n if reloj >= 1000:\n break\nmedidasDesempeño()\n",
"step-5": "# Simulador de sistema M/M/1.\r\n#\r\n# Variables de respuesta:\r\n# - Demora promedio por cliente\r\n# - Número promedio de clientes en cola\r\n# - Utilización promedio de cliente\r\n#\r\n# Funciones:\r\n# arribo()\r\n# partida()\r\n# nuevoEvento()\r\n# medidasDesempeño()\r\n# generarTiempoExponencial(t)\r\n# generarHisotgrama(lista)\r\n\r\nimport numpy as np\r\nimport random\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\ndef arribo():\r\n\r\n global reloj\r\n global tiempoUltEvento\r\n global estadoServ\r\n global tiempoServicioTotal\r\n global areaQ\r\n global numCliEnCola\r\n global cola\r\n global tiempoLibre\r\n global completaronDemora\r\n\r\n # Siguiente arribo\r\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\r\n\r\n\r\n if estadoServ == 0:\r\n # Servidor pasa a 1, ocupado\r\n estadoServ = 1\r\n tiempoLibre += reloj - tiempoUltEvento\r\n # Programo el próximo evento partida\r\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\r\n\r\n # Actualizo la cantidad de clientes que completaron la demora\r\n completaronDemora += 1\r\n\r\n else:\r\n # Acumulo el tiempo de servicio\r\n tiempoServicioTotal += (reloj - tiempoUltEvento)\r\n\r\n # Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.\r\n listaUsoServidores.append(tiempoServicioTotal / reloj)\r\n\r\n areaQ += (numCliEnCola * (reloj - tiempoUltEvento))\r\n numCliEnCola += 1\r\n\r\n # Agrego el cliente a la cola\r\n cola.append(reloj)\r\n print(cola)\r\n\r\ndef partida():\r\n\r\n global numCliEnCola\r\n global tiempoServicioTotal\r\n global areaQ\r\n global demoraAcumulada\r\n global completaronDemora\r\n global estadoServ\r\n global listaUsoServidores\r\n\r\n if numCliEnCola > 0:\r\n\r\n\r\n\r\n # Proxima partida\r\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\r\n # Acumulo la demora acumulada como el valor actual del reloj\r\n # menos el valor del reloj cuando el cliente ingresó a la cola\r\n\r\n demoraAcumulada += reloj - cola[len(cola)-1]\r\n\r\n # Actualizo el contador de clientes que completaron la demora\r\n completaronDemora += 1\r\n\r\n # Acumulo el tiempo de servicio\r\n tiempoServicioTotal += (reloj - tiempoUltEvento)\r\n\r\n # Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.\r\n listaUsoServidores.append(tiempoServicioTotal/reloj)\r\n\r\n # Calculo el Área bajo Q(t) del período anterior (Reloj - TiempoUltimoEvento)\r\n areaQ += (numCliEnCola * (reloj - tiempoUltEvento))\r\n numCliEnCola -= 1\r\n\r\n # Saco el ultimo en llegar\r\n cola.pop()\r\n print(cola)\r\n else:\r\n # Al no haber clientes en cola, establezco el estado del servidor en \"Desocupado\"\r\n estadoServ = 0\r\n # Acumulo el tiempo de servicio\r\n tiempoServicioTotal += (reloj - tiempoUltEvento)\r\n\r\n # Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.\r\n # listaUsoServidores.append(tiempoServicioTotal / reloj)\r\n listaEventos[1] = 9999999.0\r\n\r\n\r\ndef generarHisotgrama(lista, tiempototal, reloj):\r\n utilizacionProm=tiempototal/reloj\r\n plt.title('Utilizacion promedio del servidor')\r\n plt.plot(lista)\r\n plt.xlabel(\"tiempo\")\r\n plt.ylabel(\"Utilizacion promedio\")\r\n plt.axhline(utilizacionProm, color='k', ls=\"dotted\", xmax=1) # Comando para linea horizontal constante\r\n plt.ylim(0, 1) # Limites para el eje Y\r\n plt.xlim(0, len(lista)) # Limites para el eje X\r\n plt.show()\r\n\r\n\r\ndef medidasDesempeño():\r\n global listaUsoServidores\r\n global reloj\r\n global tiempoLibre\r\n global areaQ\r\n global tiempoServicioTotal\r\n global demoraAcumulada\r\n global completaronDemora\r\n\r\n print(\"Medidas de desempeño de la simulación: \")\r\n print(\"TIEMPO LIBRE DEL SERVIDOR %s\" % tiempoLibre)\r\n print(\"PORCENTAJE DE TIEMPO LIBRE %s\" % (tiempoLibre / reloj))\r\n print()\r\n print(\"El reloj quedo en \", reloj)\r\n\r\n var1 = areaQ / reloj\r\n print(\"Nro promedio de cli en cola:\", var1)\r\n\r\n var2 = tiempoServicioTotal / reloj\r\n print(\"Utilización promedio de los servidores:\", var2)\r\n\r\n var3 = demoraAcumulada / completaronDemora\r\n print(\"Demora promedio por cliente:\", var3)\r\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\r\n\r\ndef generarTiempoExponencial(media):\r\n # return np.random.exponential(media)\r\n return -(1/media) * math.log(random.random())\r\n\r\n\r\ndef nuevoEvento():\r\n\r\n global reloj\r\n global proximoEvento\r\n global listaEventos\r\n\r\n if listaEventos[0] <= listaEventos[1]:\r\n reloj = listaEventos[0]\r\n proximoEvento = \"ARRIBO\"\r\n else:\r\n reloj = listaEventos[1]\r\n proximoEvento = \"PARTIDA\"\r\n\r\n#Inicio del programa principal\r\n#Tiempo de arribo y servicio del modelo:\r\ntiempoEntreArribos = 7\r\ntiempoDeServicio = 9\r\n\r\n#Inicializacion de variables\r\nreloj = 0.0\r\nestadoServ = 0\r\ntiempoServicioTotal = 0.0\r\ntiempoLibre = 0.0\r\ndemoraAcumulada = 0.0\r\nproximoEvento = \"\"\r\nlistaEventos = []\r\ncola = []\r\nnumCliEnCola = 0\r\nareaQ = 0.0\r\ntiempoUltEvento = 0.0\r\ncompletaronDemora = 0\r\nlistaUsoServidores = []\r\n\r\n# Tiempo primer evento (arribo)\r\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\r\n#\r\n# Infinito, ya que todavia no hay clientes en el sistema\r\nlistaEventos.append(9999999.0)\r\n\r\nwhile True:\r\n nuevoEvento()\r\n\r\n # Llamada a la rutina correspondiente en función del tipo de evento\r\n if proximoEvento == \"ARRIBO\":\r\n arribo()\r\n else:\r\n partida()\r\n\r\n tiempoUltEvento = reloj\r\n\r\n if reloj >= 1000:\r\n break\r\nmedidasDesempeño()\r\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.app import App
import webbrowser
a=0.0
b="?"
n=0.0
k=""
g=""
class ghetto(GridLayout):
def matCallback(self,a):
webbrowser.open_new("https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09")
def biyoCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09")
def edebCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09")
def kimyaCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09")
def tarihCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs")
def cogCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09")
def bilisiCallback(self,a):
webbrowser.open_new("https://us02web.zoom.us/j/3469922894")
def muzCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09")
def ingCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09")
def felCallback(self,a):
webbrowser.open_new("https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09")
def __init__(self,**kwargs):
super(ghetto, self).__init__(**kwargs)
self.cols = 2
self.btn1 = Button(text='MATEMATİK')
self.btn1.bind(on_press=self.matCallback)
self.btn2 = Button(text='KİMYA')
self.btn2.bind(on_press=self.kimyaCallback)
self.btn3 = Button(text='BİYOLOJİ')
self.btn3.bind(on_press=self.biyoCallback)
self.btn4 = Button(text='FELSEFE')
self.btn4.bind(on_press=self.felCallback)
self.btn6 = Button(text='EDEBİYAT')
self.btn6.bind(on_press=self.edebCallback)
self.btn7 = Button(text='BİLİŞİM')
self.btn7.bind(on_press=self.bilisiCallback)
self.btn5 = Button(text='TARİH')
self.btn5.bind(on_press=self.tarihCallback)
self.btn8 = Button(text='MÜZİK')
self.btn8.bind(on_press=self.muzCallback)
self.btn9 = Button(text='İNGİLİZCE')
self.btn9.bind(on_press=self.ingCallback)
self.btn10 = Button(text='COĞRAFYA')
self.btn10.bind(on_press=self.cogCallback)
self.add_widget(self.btn10)
self.add_widget(self.btn1)
self.add_widget(self.btn2)
self.add_widget(self.btn3)
self.add_widget(self.btn4)
self.add_widget(self.btn5)
self.add_widget(self.btn6)
self.add_widget(self.btn7)
self.add_widget(self.btn8)
self.add_widget(self.btn9)
class main(App):
def build(self):
return ghetto()
if __name__ == "__main__":
main().run()
|
normal
|
{
"blob_id": "39affe139eec4cf6877646188839d79ed575235c",
"index": 8952,
"step-1": "<mask token>\n\n\nclass ghetto(GridLayout):\n <mask token>\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n <mask token>\n <mask token>\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ghetto(GridLayout):\n\n def matCallback(self, a):\n webbrowser.open_new(\n 'https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09'\n )\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n <mask token>\n <mask token>\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ghetto(GridLayout):\n\n def matCallback(self, a):\n webbrowser.open_new(\n 'https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09'\n )\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n\n def tarihCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs'\n )\n\n def cogCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09'\n )\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\nif __name__ == '__main__':\n main().run()\n",
"step-4": "from kivy.uix.button import Button\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.label import Label\nfrom kivy.app import App\nimport webbrowser\na = 0.0\nb = '?'\nn = 0.0\nk = ''\ng = ''\n\n\nclass ghetto(GridLayout):\n\n def matCallback(self, a):\n webbrowser.open_new(\n 'https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09'\n )\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n\n def tarihCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs'\n )\n\n def cogCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09'\n )\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\nif __name__ == '__main__':\n main().run()\n",
"step-5": "from kivy.uix.button import Button\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.floatlayout import FloatLayout\r\nfrom kivy.uix.label import Label\r\nfrom kivy.app import App\r\nimport webbrowser\r\na=0.0\r\nb=\"?\"\r\nn=0.0\r\nk=\"\"\r\ng=\"\"\r\nclass ghetto(GridLayout):\r\n def matCallback(self,a):\r\n webbrowser.open_new(\"https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09\")\r\n def biyoCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09\")\r\n def edebCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09\")\r\n def kimyaCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09\")\r\n def tarihCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs\")\r\n def cogCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09\")\r\n def bilisiCallback(self,a):\r\n webbrowser.open_new(\"https://us02web.zoom.us/j/3469922894\")\r\n def muzCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09\")\r\n def ingCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09\")\r\n def felCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09\")\r\n\r\n \r\n \r\n def __init__(self,**kwargs):\r\n super(ghetto, self).__init__(**kwargs)\r\n self.cols = 2\r\n self.btn1 = Button(text='MATEMATİK')\r\n self.btn1.bind(on_press=self.matCallback)\r\n self.btn2 = Button(text='KİMYA')\r\n self.btn2.bind(on_press=self.kimyaCallback)\r\n self.btn3 = Button(text='BİYOLOJİ')\r\n self.btn3.bind(on_press=self.biyoCallback)\r\n self.btn4 = Button(text='FELSEFE')\r\n self.btn4.bind(on_press=self.felCallback)\r\n self.btn6 = Button(text='EDEBİYAT')\r\n self.btn6.bind(on_press=self.edebCallback)\r\n self.btn7 = Button(text='BİLİŞİM')\r\n self.btn7.bind(on_press=self.bilisiCallback)\r\n self.btn5 = Button(text='TARİH')\r\n self.btn5.bind(on_press=self.tarihCallback)\r\n self.btn8 = Button(text='MÜZİK')\r\n self.btn8.bind(on_press=self.muzCallback)\r\n self.btn9 = Button(text='İNGİLİZCE')\r\n self.btn9.bind(on_press=self.ingCallback)\r\n self.btn10 = Button(text='COĞRAFYA')\r\n self.btn10.bind(on_press=self.cogCallback)\r\n self.add_widget(self.btn10)\r\n self.add_widget(self.btn1)\r\n self.add_widget(self.btn2)\r\n self.add_widget(self.btn3)\r\n self.add_widget(self.btn4)\r\n self.add_widget(self.btn5)\r\n self.add_widget(self.btn6)\r\n self.add_widget(self.btn7)\r\n self.add_widget(self.btn8)\r\n self.add_widget(self.btn9)\r\n \r\n \r\n\r\nclass main(App):\r\n def build(self):\r\n return ghetto()\r\n\r\nif __name__ == \"__main__\":\r\n main().run()\r\n",
"step-ids": [
11,
12,
15,
17,
18
]
}
|
[
11,
12,
15,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
x, x1 = [int(i) for i in input().strip().split(' ')]
x, x1 = x - 1, x1 - 1
t[i] = [x, x1]
<|reserved_special_token_0|>
while len(res) < n:
a = res[-1]
b = t[a][0]
c = t[a][1]
if c not in t[b]:
b, c = c, b
res += [b, c]
print(' '.join(str(i + 1) for i in res))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n = int(input().strip())
t = [None] * n
for i in range(n):
x, x1 = [int(i) for i in input().strip().split(' ')]
x, x1 = x - 1, x1 - 1
t[i] = [x, x1]
res = [0]
while len(res) < n:
a = res[-1]
b = t[a][0]
c = t[a][1]
if c not in t[b]:
b, c = c, b
res += [b, c]
print(' '.join(str(i + 1) for i in res))
<|reserved_special_token_1|>
#!python3
"""
I1. a
Ex1
5
1 3 5
2 1 4
3 2 4
4 1 5
5 2 3
"""
n = int(input().strip())
t = [None] * n
for i in range(n):
x,x1 = [int(i) for i in input().strip().split(' ')]
x,x1 = x-1, x1-1
t[i] = [x, x1]
res = [0]
while len(res) < n:
a = res[-1]
b = t[a][0]
c = t[a][1]
if c not in t[b]:
b, c = c, b
res += [b, c]
print(' '.join(str(i+1) for i in res))
|
flexible
|
{
"blob_id": "0e3c6e14ff184401a3f30a6198306a17686e6ebe",
"index": 2382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n x, x1 = [int(i) for i in input().strip().split(' ')]\n x, x1 = x - 1, x1 - 1\n t[i] = [x, x1]\n<mask token>\nwhile len(res) < n:\n a = res[-1]\n b = t[a][0]\n c = t[a][1]\n if c not in t[b]:\n b, c = c, b\n res += [b, c]\nprint(' '.join(str(i + 1) for i in res))\n",
"step-3": "<mask token>\nn = int(input().strip())\nt = [None] * n\nfor i in range(n):\n x, x1 = [int(i) for i in input().strip().split(' ')]\n x, x1 = x - 1, x1 - 1\n t[i] = [x, x1]\nres = [0]\nwhile len(res) < n:\n a = res[-1]\n b = t[a][0]\n c = t[a][1]\n if c not in t[b]:\n b, c = c, b\n res += [b, c]\nprint(' '.join(str(i + 1) for i in res))\n",
"step-4": "#!python3\r\n\"\"\"\r\n\r\nI1. a\r\n\r\nEx1\r\n5\r\n1 3 5\r\n2 1 4\r\n3 2 4\r\n4 1 5\r\n5 2 3\r\n\r\n\r\n\r\n\"\"\"\r\n\r\nn = int(input().strip())\r\nt = [None] * n\r\nfor i in range(n):\r\n x,x1 = [int(i) for i in input().strip().split(' ')]\r\n x,x1 = x-1, x1-1\r\n t[i] = [x, x1]\r\n\r\nres = [0]\r\nwhile len(res) < n:\r\n a = res[-1]\r\n b = t[a][0]\r\n c = t[a][1]\r\n if c not in t[b]:\r\n b, c = c, b\r\n res += [b, c]\r\n\r\n\r\nprint(' '.join(str(i+1) for i in res))\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while index < len(array):
count = 0
while count <= len(array) - 2:
if count == len(array) - 1:
break
if array[count] > array[count + 1]:
sift = array[count]
array[count] = array[count + 1]
array[count + 1] = sift
count = count + 1
index = index + 1
print(array)
<|reserved_special_token_1|>
array = [1, 7, 3, 8, 9, 2, 4]
index = 0
while index < len(array):
count = 0
while count <= len(array) - 2:
if count == len(array) - 1:
break
if array[count] > array[count + 1]:
sift = array[count]
array[count] = array[count + 1]
array[count + 1] = sift
count = count + 1
index = index + 1
print(array)
<|reserved_special_token_1|>
array = [1, 7, 3, 8, 9, 2, 4]
index = 0
while (index < len(array)):
count = 0
while(count <= len(array)-2):
if(count == len(array)-1):
break
if (array[count] > array[count+1]):
sift = array[count]
array[count] = array[count+1]
array[count+1] = sift
count = count + 1
index = index + 1
print (array)
|
flexible
|
{
"blob_id": "fc8976141a19afd099f92cbbdb578e9c620cb745",
"index": 5075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile index < len(array):\n count = 0\n while count <= len(array) - 2:\n if count == len(array) - 1:\n break\n if array[count] > array[count + 1]:\n sift = array[count]\n array[count] = array[count + 1]\n array[count + 1] = sift\n count = count + 1\n index = index + 1\nprint(array)\n",
"step-3": "array = [1, 7, 3, 8, 9, 2, 4]\nindex = 0\nwhile index < len(array):\n count = 0\n while count <= len(array) - 2:\n if count == len(array) - 1:\n break\n if array[count] > array[count + 1]:\n sift = array[count]\n array[count] = array[count + 1]\n array[count + 1] = sift\n count = count + 1\n index = index + 1\nprint(array)\n",
"step-4": "array = [1, 7, 3, 8, 9, 2, 4]\nindex = 0\nwhile (index < len(array)):\n count = 0\n while(count <= len(array)-2):\n if(count == len(array)-1):\n break\n if (array[count] > array[count+1]):\n sift = array[count]\n array[count] = array[count+1]\n array[count+1] = sift\n count = count + 1\n index = index + 1\nprint (array)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random
import matplotlib.pyplot as plt
import numpy as np
def dado(n):
i = 1
dos =0
tres =0
cuatro =0
cinco=0
seis =0
siete=0
ocho=0
nueve=0
diez=0
once=0
doce=0
cont = [0,0,0,0,0,0,0,0,0,0,0]
while i <= n:
r1 = random.randint(1,6)
r2 = random.randint(1,6)
#print("Resultado del primer dado es: ",r1)
#print("Resultado del segundo dado es: ",r2)
suma = r1 +r2
# print("Sumatoria de dados ------->",suma)
for j in range(2, 13):
if suma == j:
cont[j-2] = cont[j-2] + 1
i += 1
dividendo =1
if suma == 2:
dividendo=1
dos +=1
elif suma ==3:
dividendo=2
tres +=1
elif suma ==4:
dividendo=3
cuatro +=1
elif suma ==5:
dividendo=4
cinco +=1
elif suma ==6:
dividendo=5
seis +=1
elif suma ==7:
dividendo=6
siete +=1
elif suma ==8:
dividendo=5
ocho +=1
elif suma ==9:
dividendo=4
nueve +=1
elif suma ==10:
dividendo=3
diez +=1
elif suma ==11:
dividendo=2
once +=1
elif suma ==12:
dividendo=1
doce +=1
frecuencia = dividendo/36
# print("La frecuencia es : ", frecuencia)
print("la suma de dos se repitio",dos)
print("la suma de tres se repitio",tres)
print("la suma de cuatro se repitio",cuatro)
print("la suma de cinco se repitio",cinco)
print("la suma de seis se repitio",seis)
print("la suma de siete se repitio",siete)
print("la suma de ocho se repitio",ocho)
print("la suma de nueve se repitio",nueve)
print("la suma de diez se repitio",diez)
print("la suma de once se repitio",once)
print("la suma de doce repitio",doce)
print("fin")
etiqueta = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.xticks(range(len(cont)), etiqueta)
plt.xlabel('Sumas')
plt.ylabel('Probabilidad')
plt.title('Simulación suma Dados')
plt.bar(range(len(cont)), cont)
plt.show()
dado(100)
dado(1000)
dado(10000)
|
normal
|
{
"blob_id": "2d0d73c0ea20d6736c10d5201abcfa9d561ef216",
"index": 7474,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef dado(n):\n i = 1\n dos = 0\n tres = 0\n cuatro = 0\n cinco = 0\n seis = 0\n siete = 0\n ocho = 0\n nueve = 0\n diez = 0\n once = 0\n doce = 0\n cont = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n while i <= n:\n r1 = random.randint(1, 6)\n r2 = random.randint(1, 6)\n suma = r1 + r2\n for j in range(2, 13):\n if suma == j:\n cont[j - 2] = cont[j - 2] + 1\n i += 1\n dividendo = 1\n if suma == 2:\n dividendo = 1\n dos += 1\n elif suma == 3:\n dividendo = 2\n tres += 1\n elif suma == 4:\n dividendo = 3\n cuatro += 1\n elif suma == 5:\n dividendo = 4\n cinco += 1\n elif suma == 6:\n dividendo = 5\n seis += 1\n elif suma == 7:\n dividendo = 6\n siete += 1\n elif suma == 8:\n dividendo = 5\n ocho += 1\n elif suma == 9:\n dividendo = 4\n nueve += 1\n elif suma == 10:\n dividendo = 3\n diez += 1\n elif suma == 11:\n dividendo = 2\n once += 1\n elif suma == 12:\n dividendo = 1\n doce += 1\n frecuencia = dividendo / 36\n print('la suma de dos se repitio', dos)\n print('la suma de tres se repitio', tres)\n print('la suma de cuatro se repitio', cuatro)\n print('la suma de cinco se repitio', cinco)\n print('la suma de seis se repitio', seis)\n print('la suma de siete se repitio', siete)\n print('la suma de ocho se repitio', ocho)\n print('la suma de nueve se repitio', nueve)\n print('la suma de diez se repitio', diez)\n print('la suma de once se repitio', once)\n print('la suma de doce repitio', doce)\n print('fin')\n etiqueta = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']\n plt.xticks(range(len(cont)), etiqueta)\n plt.xlabel('Sumas')\n plt.ylabel('Probabilidad')\n plt.title('Simulación suma Dados')\n plt.bar(range(len(cont)), cont)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef dado(n):\n i = 1\n dos = 0\n tres = 0\n cuatro = 0\n cinco = 0\n seis = 0\n siete = 0\n ocho = 0\n nueve = 0\n diez = 0\n once = 0\n doce = 0\n cont = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n while i <= n:\n r1 = random.randint(1, 6)\n r2 = random.randint(1, 6)\n suma = r1 + r2\n for j in range(2, 13):\n if suma == j:\n cont[j - 2] = cont[j - 2] + 1\n i += 1\n dividendo = 1\n if suma == 2:\n dividendo = 1\n dos += 1\n elif suma == 3:\n dividendo = 2\n tres += 1\n elif suma == 4:\n dividendo = 3\n cuatro += 1\n elif suma == 5:\n dividendo = 4\n cinco += 1\n elif suma == 6:\n dividendo = 5\n seis += 1\n elif suma == 7:\n dividendo = 6\n siete += 1\n elif suma == 8:\n dividendo = 5\n ocho += 1\n elif suma == 9:\n dividendo = 4\n nueve += 1\n elif suma == 10:\n dividendo = 3\n diez += 1\n elif suma == 11:\n dividendo = 2\n once += 1\n elif suma == 12:\n dividendo = 1\n doce += 1\n frecuencia = dividendo / 36\n print('la suma de dos se repitio', dos)\n print('la suma de tres se repitio', tres)\n print('la suma de cuatro se repitio', cuatro)\n print('la suma de cinco se repitio', cinco)\n print('la suma de seis se repitio', seis)\n print('la suma de siete se repitio', siete)\n print('la suma de ocho se repitio', ocho)\n print('la suma de nueve se repitio', nueve)\n print('la suma de diez se repitio', diez)\n print('la suma de once se repitio', once)\n print('la suma de doce repitio', doce)\n print('fin')\n etiqueta = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']\n plt.xticks(range(len(cont)), etiqueta)\n plt.xlabel('Sumas')\n plt.ylabel('Probabilidad')\n plt.title('Simulación suma Dados')\n plt.bar(range(len(cont)), cont)\n plt.show()\n\n\ndado(100)\ndado(1000)\ndado(10000)\n",
"step-4": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef dado(n):\n i = 1\n dos = 0\n tres = 0\n cuatro = 0\n cinco = 0\n seis = 0\n siete = 0\n ocho = 0\n nueve = 0\n diez = 0\n once = 0\n doce = 0\n cont = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n while i <= n:\n r1 = random.randint(1, 6)\n r2 = random.randint(1, 6)\n suma = r1 + r2\n for j in range(2, 13):\n if suma == j:\n cont[j - 2] = cont[j - 2] + 1\n i += 1\n dividendo = 1\n if suma == 2:\n dividendo = 1\n dos += 1\n elif suma == 3:\n dividendo = 2\n tres += 1\n elif suma == 4:\n dividendo = 3\n cuatro += 1\n elif suma == 5:\n dividendo = 4\n cinco += 1\n elif suma == 6:\n dividendo = 5\n seis += 1\n elif suma == 7:\n dividendo = 6\n siete += 1\n elif suma == 8:\n dividendo = 5\n ocho += 1\n elif suma == 9:\n dividendo = 4\n nueve += 1\n elif suma == 10:\n dividendo = 3\n diez += 1\n elif suma == 11:\n dividendo = 2\n once += 1\n elif suma == 12:\n dividendo = 1\n doce += 1\n frecuencia = dividendo / 36\n print('la suma de dos se repitio', dos)\n print('la suma de tres se repitio', tres)\n print('la suma de cuatro se repitio', cuatro)\n print('la suma de cinco se repitio', cinco)\n print('la suma de seis se repitio', seis)\n print('la suma de siete se repitio', siete)\n print('la suma de ocho se repitio', ocho)\n print('la suma de nueve se repitio', nueve)\n print('la suma de diez se repitio', diez)\n print('la suma de once se repitio', once)\n print('la suma de doce repitio', doce)\n print('fin')\n etiqueta = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']\n plt.xticks(range(len(cont)), etiqueta)\n plt.xlabel('Sumas')\n plt.ylabel('Probabilidad')\n plt.title('Simulación suma Dados')\n plt.bar(range(len(cont)), cont)\n plt.show()\n\n\ndado(100)\ndado(1000)\ndado(10000)\n",
"step-5": "import random\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\ndef dado(n):\r\n i = 1\r\n dos =0\r\n tres =0\r\n cuatro =0\r\n cinco=0\r\n seis =0\r\n siete=0\r\n ocho=0\r\n nueve=0\r\n diez=0\r\n once=0\r\n doce=0\r\n cont = [0,0,0,0,0,0,0,0,0,0,0]\r\n while i <= n:\r\n r1 = random.randint(1,6)\r\n r2 = random.randint(1,6)\r\n #print(\"Resultado del primer dado es: \",r1)\r\n #print(\"Resultado del segundo dado es: \",r2)\r\n suma = r1 +r2\r\n # print(\"Sumatoria de dados ------->\",suma)\r\n for j in range(2, 13):\r\n if suma == j:\r\n cont[j-2] = cont[j-2] + 1\r\n i += 1\r\n dividendo =1\r\n if suma == 2:\r\n dividendo=1\r\n dos +=1\r\n elif suma ==3:\r\n dividendo=2\r\n tres +=1\r\n elif suma ==4:\r\n dividendo=3\r\n cuatro +=1\r\n elif suma ==5:\r\n dividendo=4\r\n cinco +=1\r\n elif suma ==6:\r\n dividendo=5\r\n seis +=1\r\n elif suma ==7:\r\n dividendo=6\r\n siete +=1\r\n elif suma ==8:\r\n dividendo=5\r\n ocho +=1\r\n elif suma ==9:\r\n dividendo=4\r\n nueve +=1\r\n elif suma ==10:\r\n dividendo=3\r\n diez +=1\r\n elif suma ==11:\r\n dividendo=2\r\n once +=1\r\n elif suma ==12:\r\n dividendo=1\r\n doce +=1\r\n frecuencia = dividendo/36\r\n # print(\"La frecuencia es : \", frecuencia)\r\n \r\n print(\"la suma de dos se repitio\",dos)\r\n print(\"la suma de tres se repitio\",tres)\r\n print(\"la suma de cuatro se repitio\",cuatro)\r\n print(\"la suma de cinco se repitio\",cinco)\r\n print(\"la suma de seis se repitio\",seis)\r\n print(\"la suma de siete se repitio\",siete)\r\n print(\"la suma de ocho se repitio\",ocho)\r\n print(\"la suma de nueve se repitio\",nueve)\r\n print(\"la suma de diez se repitio\",diez)\r\n print(\"la suma de once se repitio\",once)\r\n print(\"la suma de doce repitio\",doce)\r\n print(\"fin\")\r\n etiqueta = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']\r\n plt.xticks(range(len(cont)), etiqueta)\r\n plt.xlabel('Sumas')\r\n plt.ylabel('Probabilidad')\r\n plt.title('Simulación suma Dados')\r\n plt.bar(range(len(cont)), cont) \r\n plt.show()\r\n \r\n \r\ndado(100)\r\ndado(1000)\r\ndado(10000)\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',
load_feature_groups=['rhymes', 'statistical', 'statistical_time',
'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=
genre_target_labels())
pipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(
epochs=50))])
evaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,
64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.
grid_parameters_genres())
result_handlers = [result_handlers.print_gridsearch_results]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from dbispipeline.evaluators import GridEvaluator
import dbispipeline.result_handlers as result_handlers
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from nlp4musa2020.dataloaders.alf200k import ALF200KLoader
from nlp4musa2020.dataloaders.alf200k import genre_target_labels
from nlp4musa2020.dataloaders.vectorizer import lda
from nlp4musa2020.dataloaders.vectorizer import tfidf
import nlp4musa2020.evaluators as evaluators
from nlp4musa2020.models.simplenn_genre import SimpleGenreNN
dataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',
load_feature_groups=['rhymes', 'statistical', 'statistical_time',
'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=
genre_target_labels())
pipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(
epochs=50))])
evaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,
64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.
grid_parameters_genres())
result_handlers = [result_handlers.print_gridsearch_results]
<|reserved_special_token_1|>
"""Config for a linear regression model evaluated on a diabetes dataset."""
from dbispipeline.evaluators import GridEvaluator
import dbispipeline.result_handlers as result_handlers
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from nlp4musa2020.dataloaders.alf200k import ALF200KLoader
from nlp4musa2020.dataloaders.alf200k import genre_target_labels
from nlp4musa2020.dataloaders.vectorizer import lda
from nlp4musa2020.dataloaders.vectorizer import tfidf
import nlp4musa2020.evaluators as evaluators
from nlp4musa2020.models.simplenn_genre import SimpleGenreNN
dataloader = ALF200KLoader(
path='data/processed/dataset-lfm-genres.pickle',
load_feature_groups=[
'rhymes',
'statistical',
'statistical_time',
'explicitness',
'audio',
],
text_vectorizers=lda() + tfidf(),
target=genre_target_labels(),
)
pipeline = Pipeline([
('scaler', StandardScaler()),
('model', SimpleGenreNN(epochs=50)),
])
evaluator = GridEvaluator(
parameters={
'model__dense_sizes': [
(32, 32),
(64, 64),
],
'model__dropout_rate': [0.1],
},
grid_parameters=evaluators.grid_parameters_genres(),
)
result_handlers = [
result_handlers.print_gridsearch_results,
]
|
flexible
|
{
"blob_id": "473c653da54ebdb7fe8a9eefc166cab167f43357",
"index": 3994,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=['rhymes', 'statistical', 'statistical_time',\n 'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=\n genre_target_labels())\npipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(\n epochs=50))])\nevaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,\n 64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.\n grid_parameters_genres())\nresult_handlers = [result_handlers.print_gridsearch_results]\n",
"step-3": "<mask token>\nfrom dbispipeline.evaluators import GridEvaluator\nimport dbispipeline.result_handlers as result_handlers\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom nlp4musa2020.dataloaders.alf200k import ALF200KLoader\nfrom nlp4musa2020.dataloaders.alf200k import genre_target_labels\nfrom nlp4musa2020.dataloaders.vectorizer import lda\nfrom nlp4musa2020.dataloaders.vectorizer import tfidf\nimport nlp4musa2020.evaluators as evaluators\nfrom nlp4musa2020.models.simplenn_genre import SimpleGenreNN\ndataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=['rhymes', 'statistical', 'statistical_time',\n 'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=\n genre_target_labels())\npipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(\n epochs=50))])\nevaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,\n 64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.\n grid_parameters_genres())\nresult_handlers = [result_handlers.print_gridsearch_results]\n",
"step-4": "\"\"\"Config for a linear regression model evaluated on a diabetes dataset.\"\"\"\nfrom dbispipeline.evaluators import GridEvaluator\nimport dbispipeline.result_handlers as result_handlers\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom nlp4musa2020.dataloaders.alf200k import ALF200KLoader\nfrom nlp4musa2020.dataloaders.alf200k import genre_target_labels\nfrom nlp4musa2020.dataloaders.vectorizer import lda\nfrom nlp4musa2020.dataloaders.vectorizer import tfidf\nimport nlp4musa2020.evaluators as evaluators\nfrom nlp4musa2020.models.simplenn_genre import SimpleGenreNN\n\ndataloader = ALF200KLoader(\n path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=[\n 'rhymes',\n 'statistical',\n 'statistical_time',\n 'explicitness',\n 'audio',\n ],\n text_vectorizers=lda() + tfidf(),\n target=genre_target_labels(),\n)\n\npipeline = Pipeline([\n ('scaler', StandardScaler()),\n ('model', SimpleGenreNN(epochs=50)),\n])\n\nevaluator = GridEvaluator(\n parameters={\n 'model__dense_sizes': [\n (32, 32),\n (64, 64),\n ],\n 'model__dropout_rate': [0.1],\n },\n grid_parameters=evaluators.grid_parameters_genres(),\n)\n\nresult_handlers = [\n result_handlers.print_gridsearch_results,\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding: utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import time
from urllib import urlencode
from urlparse import parse_qs, urlparse, urlunparse
from flask import current_app as app
from flask import url_for
from jose import jwt
from oauth2client.client import flow_from_clientsecrets
from pathlib2 import Path
from .models import Customer
def create_oauth_flow():
"""Prepare Google OAuth workflow from config file."""
app.flow = flow_from_clientsecrets(
str(Path(app.config['ROOT_DIR'], 'configs/client_secrets.json')),
scope=['email', 'profile'],
redirect_uri=url_for('auth.oauth2callback', _external=True),
)
def create_jwt(user, name=None, renewable=False):
"""Create a JWT."""
session_user = sessionize_user(user, name)
session_customer = sessionize_customer(
Customer.get_by_name(user.customers[0])
)
return format_jwt(session_user, session_customer, renewable)
def sessionize_user(user, name):
document = user.to_dict(include_meta=True)
sessionized = {}
sessionized.update(document['_source'])
sessionized['_id'] = document['_id']
sessionized['google_name'] = name
return sessionized
def sessionize_customer(customer):
document = customer.to_dict(include_meta=True)
sessionized = {}
sessionized.update(document['_source'])
sessionized['_id'] = document['_id']
return sessionized
def format_jwt(user, active_customer, renewable):
"""Format a JWT and MAC it."""
now = int(time.time())
claims = {
# reserved: https://tools.ietf.org/html/rfc7519#section-4.1
'exp': now + app.config['AUTH_TOKEN_LIFETIME'],
'nbf': now, # not before
'iss': app.config['AUTH_TOKEN_ISSUER'],
'iat': now, # issue date
# private: https://tools.ietf.org/html/rfc7519#section-4.3
'user': user,
'active_customer': active_customer,
'renewable': renewable,
}
return jwt.encode(
claims,
key=app.config['AUTH_JWT_SECRET'],
algorithm=app.config['AUTH_JWT_ALGORITHM'],
)
def set_params(url, params):
"""Set GET parameters on a URL."""
components = urlparse(url)
query = parse_qs(components.query)
query.update(params)
components = components._replace(query=urlencode(query, doseq=True))
return urlunparse(components)
|
normal
|
{
"blob_id": "fe73a80b15cad025a33930ddd9abb31524cd0244",
"index": 9404,
"step-1": "<mask token>\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\n<mask token>\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n query = parse_qs(components.query)\n query.update(params)\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-3": "<mask token>\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\ndef format_jwt(user, active_customer, renewable):\n \"\"\"Format a JWT and MAC it.\"\"\"\n now = int(time.time())\n claims = {'exp': now + app.config['AUTH_TOKEN_LIFETIME'], 'nbf': now,\n 'iss': app.config['AUTH_TOKEN_ISSUER'], 'iat': now, 'user': user,\n 'active_customer': active_customer, 'renewable': renewable}\n return jwt.encode(claims, key=app.config['AUTH_JWT_SECRET'], algorithm=\n app.config['AUTH_JWT_ALGORITHM'])\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n query = parse_qs(components.query)\n query.update(params)\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport time\nfrom urllib import urlencode\nfrom urlparse import parse_qs, urlparse, urlunparse\nfrom flask import current_app as app\nfrom flask import url_for\nfrom jose import jwt\nfrom oauth2client.client import flow_from_clientsecrets\nfrom pathlib2 import Path\nfrom .models import Customer\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\ndef format_jwt(user, active_customer, renewable):\n \"\"\"Format a JWT and MAC it.\"\"\"\n now = int(time.time())\n claims = {'exp': now + app.config['AUTH_TOKEN_LIFETIME'], 'nbf': now,\n 'iss': app.config['AUTH_TOKEN_ISSUER'], 'iat': now, 'user': user,\n 'active_customer': active_customer, 'renewable': renewable}\n return jwt.encode(claims, key=app.config['AUTH_JWT_SECRET'], algorithm=\n app.config['AUTH_JWT_ALGORITHM'])\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n query = parse_qs(components.query)\n query.update(params)\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-5": "# coding: utf-8\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport time\nfrom urllib import urlencode\nfrom urlparse import parse_qs, urlparse, urlunparse\n\nfrom flask import current_app as app\nfrom flask import url_for\nfrom jose import jwt\nfrom oauth2client.client import flow_from_clientsecrets\nfrom pathlib2 import Path\n\nfrom .models import Customer\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(\n str(Path(app.config['ROOT_DIR'], 'configs/client_secrets.json')),\n scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True),\n )\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(\n Customer.get_by_name(user.customers[0])\n )\n\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n\n return sessionized\n\n\ndef format_jwt(user, active_customer, renewable):\n \"\"\"Format a JWT and MAC it.\"\"\"\n now = int(time.time())\n\n claims = {\n # reserved: https://tools.ietf.org/html/rfc7519#section-4.1\n 'exp': now + app.config['AUTH_TOKEN_LIFETIME'],\n 'nbf': now, # not before\n 'iss': app.config['AUTH_TOKEN_ISSUER'],\n 'iat': now, # issue date\n # private: https://tools.ietf.org/html/rfc7519#section-4.3\n 'user': user,\n 'active_customer': active_customer,\n 'renewable': renewable,\n }\n\n return jwt.encode(\n claims,\n key=app.config['AUTH_JWT_SECRET'],\n algorithm=app.config['AUTH_JWT_ALGORITHM'],\n )\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n\n query = parse_qs(components.query)\n query.update(params)\n\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from unittest import TestCase
from unittest.mock import patch, mock_open, call
from network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException
from network_simulator.service import NetworkSimulatorService
from network_simulator.service.network_simulator_service import Device
class TestNetworkSimulatorService(TestCase):
@patch("network_simulator.service.network_topology_handler.write_network_topology_to_file")
def setUp(self, write_network_topology_to_file_mock):
self.device_id = "testid"
self.device_type = "vm"
self.tap_if_name = "testtap"
self.device_data_dict = {
"device_id": self.device_id,
"device_type": self.device_type,
"tap_if_name": self.tap_if_name,
"xpos": 5.0,
"ypos": 3.0
}
self.test_net_namespace = "testns"
self.network_svc = NetworkSimulatorService(self.test_net_namespace)
def test_deviceStrRepresentation(self):
device = Device(self.device_data_dict)
str_rep = "{},{},{}".format(self.device_id, self.device_type, self.tap_if_name)
self.assertEqual(str_rep, str(device))
def test_registerDeviceTwice(self):
self.network_svc.devices[self.device_id] = ""
with self.assertRaises(DeviceAlreadyRegisteredException):
self.network_svc.register_new_device(self.device_data_dict)
def test_registerNewDevice(self):
self.network_svc.register_new_device(self.device_data_dict)
self.assertIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterDevice(self):
self.network_svc.devices[self.device_id] = ""
self.network_svc.deregister_device(self.device_id)
self.assertNotIn(self.device_id, self.network_svc.devices.keys())
def test_deregisterInvalidDevice(self):
with self.assertRaises(UnknownDeviceException):
self.network_svc.deregister_device(self.device_id)
# helper
def create_device(self, device_id):
device = Device(self.device_data_dict)
device.device_id = device_id
device.xpos = 4.0
device.ypos = 3.0
return device
|
normal
|
{
"blob_id": "8e854398084e89b0b8436d6b0a2bf8f36a9c7bd5",
"index": 187,
"step-1": "<mask token>\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n <mask token>\n <mask token>\n <mask token>\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-2": "<mask token>\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = ''\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n <mask token>\n <mask token>\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-3": "<mask token>\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = ''\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n <mask token>\n\n def test_deregisterDevice(self):\n self.network_svc.devices[self.device_id] = ''\n self.network_svc.deregister_device(self.device_id)\n self.assertNotIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-4": "from unittest import TestCase\nfrom unittest.mock import patch, mock_open, call\nfrom network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException\nfrom network_simulator.service import NetworkSimulatorService\nfrom network_simulator.service.network_simulator_service import Device\n\n\nclass TestNetworkSimulatorService(TestCase):\n\n @patch(\n 'network_simulator.service.network_topology_handler.write_network_topology_to_file'\n )\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = 'testid'\n self.device_type = 'vm'\n self.tap_if_name = 'testtap'\n self.device_data_dict = {'device_id': self.device_id, 'device_type':\n self.device_type, 'tap_if_name': self.tap_if_name, 'xpos': 5.0,\n 'ypos': 3.0}\n self.test_net_namespace = 'testns'\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = '{},{},{}'.format(self.device_id, self.device_type, self.\n tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = ''\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n\n def test_registerNewDevice(self):\n self.network_svc.register_new_device(self.device_data_dict)\n self.assertIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterDevice(self):\n self.network_svc.devices[self.device_id] = ''\n self.network_svc.deregister_device(self.device_id)\n self.assertNotIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n",
"step-5": "from unittest import TestCase\nfrom unittest.mock import patch, mock_open, call\n\nfrom network_simulator.exceptions.device_exceptions import DeviceAlreadyRegisteredException, UnknownDeviceException\nfrom network_simulator.service import NetworkSimulatorService\nfrom network_simulator.service.network_simulator_service import Device\n\n\nclass TestNetworkSimulatorService(TestCase):\n @patch(\"network_simulator.service.network_topology_handler.write_network_topology_to_file\")\n def setUp(self, write_network_topology_to_file_mock):\n self.device_id = \"testid\"\n self.device_type = \"vm\"\n self.tap_if_name = \"testtap\"\n self.device_data_dict = {\n \"device_id\": self.device_id,\n \"device_type\": self.device_type,\n \"tap_if_name\": self.tap_if_name,\n \"xpos\": 5.0,\n \"ypos\": 3.0\n }\n self.test_net_namespace = \"testns\"\n self.network_svc = NetworkSimulatorService(self.test_net_namespace)\n\n def test_deviceStrRepresentation(self):\n device = Device(self.device_data_dict)\n str_rep = \"{},{},{}\".format(self.device_id, self.device_type, self.tap_if_name)\n self.assertEqual(str_rep, str(device))\n\n def test_registerDeviceTwice(self):\n self.network_svc.devices[self.device_id] = \"\"\n with self.assertRaises(DeviceAlreadyRegisteredException):\n self.network_svc.register_new_device(self.device_data_dict)\n\n def test_registerNewDevice(self):\n self.network_svc.register_new_device(self.device_data_dict)\n self.assertIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterDevice(self):\n self.network_svc.devices[self.device_id] = \"\"\n self.network_svc.deregister_device(self.device_id)\n self.assertNotIn(self.device_id, self.network_svc.devices.keys())\n\n def test_deregisterInvalidDevice(self):\n with self.assertRaises(UnknownDeviceException):\n self.network_svc.deregister_device(self.device_id)\n\n # helper\n def create_device(self, device_id):\n device = Device(self.device_data_dict)\n device.device_id = device_id\n device.xpos = 4.0\n device.ypos = 3.0\n return device\n\n\n\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@public
def Main() ->int:
a = 'just a test'
return len(a)
<|reserved_special_token_1|>
from boa3.builtin import public
@public
def Main() ->int:
a = 'just a test'
return len(a)
|
flexible
|
{
"blob_id": "e44e19dbeb6e1e346ca371ca8730f53ee5b95d47",
"index": 5402,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@public\ndef Main() ->int:\n a = 'just a test'\n return len(a)\n",
"step-3": "from boa3.builtin import public\n\n\n@public\ndef Main() ->int:\n a = 'just a test'\n return len(a)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
dic = {}
try:
print(dic[55])
except Exception as err:
print('Mensagem: ', err)
|
normal
|
{
"blob_id": "618aa64c08ebf8d9a0bc9662195ece2bbd485c17",
"index": 1079,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n print(dic[55])\nexcept Exception as err:\n print('Mensagem: ', err)\n",
"step-3": "dic = {}\ntry:\n print(dic[55])\nexcept Exception as err:\n print('Mensagem: ', err)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from flask_restful import Resource, reqparse
import sqlite3
from flask_jwt import jwt_required
from models.item_model import ItemModel
from flask_sqlalchemy import SQLAlchemy
from d import db
from models.store_model import StoreModel
class Modell(Resource):
def get(self, name):
item = StoreModel.find_by_name(name)
return item.json()
def post(self, name):
if StoreModel.find_by_name(name):
return {"message": "sorry no store available in this name"}
#data = Modell.requested.parse_args()
item = StoreModel(name)
item.save_to_db()
return item.json()
def put(self, name):
# data = Modell.requested.parse_args()
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
def delete(self, name):
item=StoreModel.find_by_name(name)
if item:
item.delete_from_db()
return {"m":"delted successfully"}
class Storelist(Resource):
def get(self):
return {"item":[x for x in StoreModel.query.all()]}
|
normal
|
{
"blob_id": "5616ec135a2233e742ff3b2b1f378ec12298b935",
"index": 9578,
"step-1": "<mask token>\n\n\nclass Modell(Resource):\n <mask token>\n <mask token>\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-2": "<mask token>\n\n\nclass Modell(Resource):\n <mask token>\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-3": "<mask token>\n\n\nclass Modell(Resource):\n\n def get(self, name):\n item = StoreModel.find_by_name(name)\n return item.json()\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-4": "from flask_restful import Resource, reqparse\nimport sqlite3\nfrom flask_jwt import jwt_required\nfrom models.item_model import ItemModel\nfrom flask_sqlalchemy import SQLAlchemy\nfrom d import db\nfrom models.store_model import StoreModel\n\n\nclass Modell(Resource):\n\n def get(self, name):\n item = StoreModel.find_by_name(name)\n return item.json()\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n\n def delete(self, name):\n item = StoreModel.find_by_name(name)\n if item:\n item.delete_from_db()\n return {'m': 'delted successfully'}\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-5": "from flask_restful import Resource, reqparse\r\nimport sqlite3\r\nfrom flask_jwt import jwt_required\r\nfrom models.item_model import ItemModel\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom d import db\r\nfrom models.store_model import StoreModel\r\n\r\n\r\nclass Modell(Resource):\r\n\r\n\r\n def get(self, name):\r\n item = StoreModel.find_by_name(name)\r\n return item.json()\r\n\r\n\r\n def post(self, name):\r\n if StoreModel.find_by_name(name):\r\n return {\"message\": \"sorry no store available in this name\"}\r\n #data = Modell.requested.parse_args()\r\n item = StoreModel(name)\r\n item.save_to_db()\r\n return item.json()\r\n\r\n\r\n def put(self, name):\r\n# data = Modell.requested.parse_args()\r\n item = StoreModel.find_by_name(name)\r\n\r\n\r\n\r\n item.save_to_db()\r\n return item.json()\r\n\r\n\r\n def delete(self, name):\r\n item=StoreModel.find_by_name(name)\r\n if item:\r\n item.delete_from_db()\r\n return {\"m\":\"delted successfully\"}\r\n\r\n\r\n\r\nclass Storelist(Resource):\r\n\r\n def get(self):\r\n return {\"item\":[x for x in StoreModel.query.all()]}",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
from mbc import MBC
import random
import sys
from typing import Dict
from interface import Interface
from reg import Register, HandlerProxy
# I/O Registers
IE = 0xFFFF
DIV = 0xFF04
TIMA= 0xFF05
TMA = 0xFF06
TAC = 0xFF07
IF = 0xFF0F
LY = 0xFF44
class MMU():
#0000 3FFF 16KB ROM bank 00 From cartridge, usually a fixed bank
#4000 7FFF 16KB ROM Bank 01~NN From cartridge, switchable bank via MBC (if any)
#8000 9FFF 8KB Video RAM (VRAM) Only bank 0 in Non-CGB mode
#Switchable bank 0/1 in CGB mode
#
#A000 BFFF 8KB External RAM In cartridge, switchable bank if any
#C000 CFFF 4KB Work RAM (WRAM) bank 0
#D000 DFFF 4KB Work RAM (WRAM) bank 1~N Only bank 1 in Non-CGB mode
#Switchable bank 1~7 in CGB mode
#
#E000 FDFF Mirror of C000~DDFF (ECHO RAM) Typically not used
#FE00 FE9F Sprite attribute table (OAM)
#FEA0 FEFF Not Usable
#FF00 FF7F I/O Registers
#FF80 FFFE High RAM (HRAM)
#FFFF FFFF Interrupts Enable Register (IE)
def __init__(self, interface:Interface, mbc:MBC) -> None:
self._ui = interface
self.mem = bytearray(random.getrandbits(8) for _ in range(65536)) # type: ignore # Randomise RAM
view = memoryview(self.mem)
self._rom0 = view[0:0x4000]
self._rom1 = view[0x4000:0x8000]
self._vram = view[0x8000:0xA000]
self._eram = view[0xA000:0xC000]
self._wram = view[0xC000:0xE000]
self._wram2 = view[0xE000:0xFE00]
self.OAM = view[0xFE00:0xFEA0]
self.IO = view[0xFF00:0xFF80]
self._HiRAM = view[0xFF80:0xFFFF]
self.view = view
self.mbc = mbc
self.mbc.bank0 = self._rom0
self.mbc.bank1 = self._rom1
self.view[0xFE00:0xFFFF] = bytearray([0x00 for _ in range(0x1FF)]) # IO, etc defaults to blank
self.mem[0xFFFF] = 0xFF # IE
self.link_buffer = 0
self.serial_buff = ""
self._io_handlers:Dict[int, Register] = {}
self.add_io_handler(0xFF46, HandlerProxy(self.dma))
# Add bootrom disable handler
self.add_io_handler(0xFF50, HandlerProxy(self.mbc.disable_bootrom))
def dma(self, val:int) -> None:
dest = 0xFE00
offset = val * 0x100
for n in range(0xA0):
self.mem[dest + n] = self.mem[n + offset]
def __getitem__(self, val:int) -> int:
if val < 0xE000:
return self.view[val]
elif val < 0xFE00:
# Echo RAM, subtract 0x2000
return self.view[val-0x2000]
elif val < 0xFE80:
return self.OAM[val-0xFE00]
elif val < 0xFF00:
return 0xFF
elif val < 0xFF80:
if val in self._io_handlers:
return self._io_handlers[val].value
elif val == 0xFF00:
return self._ui.input
else:
return self.IO[val-0xFF00]
elif val < 0xFFFF:
return self._HiRAM[val-0xFF80]
elif val == 0xFFFF:
return self.mem[0xFFFF]
raise ValueError("Access out of bounds")
def __setitem__(self, key:int, val:int) -> None:
if key < 0x8000:
self.mbc[key] = val
elif key < 0xA000:
self._vram[key-0x8000] = val
elif key < 0xC000:
if self.mbc.ram_enabled:
# TODO: Read $0x149 and determine RAM Size
# TODO: Pass to MBC
self._eram[key-0xA000] = val
elif key < 0xE000:
self._wram[key-0xC000] = val
elif key < 0xFE00:
self._wram[key-0xE000] = val
elif key < 0xFEA0:
self.OAM[key-0xFE00] = val
elif key < 0xFF00:
pass
elif key < 0xFF80:
if key in self._io_handlers:
self._io_handlers[key].value = val
if key == 0xFF00:
self._ui.input = val
elif key == 0xFF01:
self.link_buffer = val
elif key == 0xFF02:
if val == 0x81:
self.serial_buff += chr(self.link_buffer)
if self.link_buffer == ord("\n"):
print(self.serial_buff, end='', file=sys.stderr)
# Test ROM Routines
if self.serial_buff == "Passed\n":
#sys.exit(0)
pass
elif self.serial_buff == "Failed\n":
#sys.exit(1)
pass
self.serial_buff = ""
else:
self.IO[key-0xFF00] = val
elif key < 0xFFFF:
self._HiRAM[key-0xFF80] = val
else:
self.mem[65535] = val
def add_io_handler(self, val:int, handler:Register) -> None:
self._io_handlers[val] = handler
|
normal
|
{
"blob_id": "1a7363736076620b7704d7264b2f0bb24514165c",
"index": 9816,
"step-1": "<mask token>\n\n\nclass MMU:\n <mask token>\n\n def dma(self, val: int) ->None:\n dest = 65024\n offset = val * 256\n for n in range(160):\n self.mem[dest + n] = self.mem[n + offset]\n <mask token>\n\n def __setitem__(self, key: int, val: int) ->None:\n if key < 32768:\n self.mbc[key] = val\n elif key < 40960:\n self._vram[key - 32768] = val\n elif key < 49152:\n if self.mbc.ram_enabled:\n self._eram[key - 40960] = val\n elif key < 57344:\n self._wram[key - 49152] = val\n elif key < 65024:\n self._wram[key - 57344] = val\n elif key < 65184:\n self.OAM[key - 65024] = val\n elif key < 65280:\n pass\n elif key < 65408:\n if key in self._io_handlers:\n self._io_handlers[key].value = val\n if key == 65280:\n self._ui.input = val\n elif key == 65281:\n self.link_buffer = val\n elif key == 65282:\n if val == 129:\n self.serial_buff += chr(self.link_buffer)\n if self.link_buffer == ord('\\n'):\n print(self.serial_buff, end='', file=sys.stderr)\n if self.serial_buff == 'Passed\\n':\n pass\n elif self.serial_buff == 'Failed\\n':\n pass\n self.serial_buff = ''\n else:\n self.IO[key - 65280] = val\n elif key < 65535:\n self._HiRAM[key - 65408] = val\n else:\n self.mem[65535] = val\n\n def add_io_handler(self, val: int, handler: Register) ->None:\n self._io_handlers[val] = handler\n",
"step-2": "<mask token>\n\n\nclass MMU:\n\n def __init__(self, interface: Interface, mbc: MBC) ->None:\n self._ui = interface\n self.mem = bytearray(random.getrandbits(8) for _ in range(65536))\n view = memoryview(self.mem)\n self._rom0 = view[0:16384]\n self._rom1 = view[16384:32768]\n self._vram = view[32768:40960]\n self._eram = view[40960:49152]\n self._wram = view[49152:57344]\n self._wram2 = view[57344:65024]\n self.OAM = view[65024:65184]\n self.IO = view[65280:65408]\n self._HiRAM = view[65408:65535]\n self.view = view\n self.mbc = mbc\n self.mbc.bank0 = self._rom0\n self.mbc.bank1 = self._rom1\n self.view[65024:65535] = bytearray([(0) for _ in range(511)])\n self.mem[65535] = 255\n self.link_buffer = 0\n self.serial_buff = ''\n self._io_handlers: Dict[int, Register] = {}\n self.add_io_handler(65350, HandlerProxy(self.dma))\n self.add_io_handler(65360, HandlerProxy(self.mbc.disable_bootrom))\n\n def dma(self, val: int) ->None:\n dest = 65024\n offset = val * 256\n for n in range(160):\n self.mem[dest + n] = self.mem[n + offset]\n\n def __getitem__(self, val: int) ->int:\n if val < 57344:\n return self.view[val]\n elif val < 65024:\n return self.view[val - 8192]\n elif val < 65152:\n return self.OAM[val - 65024]\n elif val < 65280:\n return 255\n elif val < 65408:\n if val in self._io_handlers:\n return self._io_handlers[val].value\n elif val == 65280:\n return self._ui.input\n else:\n return self.IO[val - 65280]\n elif val < 65535:\n return self._HiRAM[val - 65408]\n elif val == 65535:\n return self.mem[65535]\n raise ValueError('Access out of bounds')\n\n def __setitem__(self, key: int, val: int) ->None:\n if key < 32768:\n self.mbc[key] = val\n elif key < 40960:\n self._vram[key - 32768] = val\n elif key < 49152:\n if self.mbc.ram_enabled:\n self._eram[key - 40960] = val\n elif key < 57344:\n self._wram[key - 49152] = val\n elif key < 65024:\n self._wram[key - 57344] = val\n elif key < 65184:\n self.OAM[key - 65024] = val\n elif key < 65280:\n pass\n elif key < 65408:\n if key in self._io_handlers:\n self._io_handlers[key].value = val\n if key == 65280:\n self._ui.input = val\n elif key == 65281:\n self.link_buffer = val\n elif key == 65282:\n if val == 129:\n self.serial_buff += chr(self.link_buffer)\n if self.link_buffer == ord('\\n'):\n print(self.serial_buff, end='', file=sys.stderr)\n if self.serial_buff == 'Passed\\n':\n pass\n elif self.serial_buff == 'Failed\\n':\n pass\n self.serial_buff = ''\n else:\n self.IO[key - 65280] = val\n elif key < 65535:\n self._HiRAM[key - 65408] = val\n else:\n self.mem[65535] = val\n\n def add_io_handler(self, val: int, handler: Register) ->None:\n self._io_handlers[val] = handler\n",
"step-3": "<mask token>\nIE = 65535\nDIV = 65284\nTIMA = 65285\nTMA = 65286\nTAC = 65287\nIF = 65295\nLY = 65348\n\n\nclass MMU:\n\n def __init__(self, interface: Interface, mbc: MBC) ->None:\n self._ui = interface\n self.mem = bytearray(random.getrandbits(8) for _ in range(65536))\n view = memoryview(self.mem)\n self._rom0 = view[0:16384]\n self._rom1 = view[16384:32768]\n self._vram = view[32768:40960]\n self._eram = view[40960:49152]\n self._wram = view[49152:57344]\n self._wram2 = view[57344:65024]\n self.OAM = view[65024:65184]\n self.IO = view[65280:65408]\n self._HiRAM = view[65408:65535]\n self.view = view\n self.mbc = mbc\n self.mbc.bank0 = self._rom0\n self.mbc.bank1 = self._rom1\n self.view[65024:65535] = bytearray([(0) for _ in range(511)])\n self.mem[65535] = 255\n self.link_buffer = 0\n self.serial_buff = ''\n self._io_handlers: Dict[int, Register] = {}\n self.add_io_handler(65350, HandlerProxy(self.dma))\n self.add_io_handler(65360, HandlerProxy(self.mbc.disable_bootrom))\n\n def dma(self, val: int) ->None:\n dest = 65024\n offset = val * 256\n for n in range(160):\n self.mem[dest + n] = self.mem[n + offset]\n\n def __getitem__(self, val: int) ->int:\n if val < 57344:\n return self.view[val]\n elif val < 65024:\n return self.view[val - 8192]\n elif val < 65152:\n return self.OAM[val - 65024]\n elif val < 65280:\n return 255\n elif val < 65408:\n if val in self._io_handlers:\n return self._io_handlers[val].value\n elif val == 65280:\n return self._ui.input\n else:\n return self.IO[val - 65280]\n elif val < 65535:\n return self._HiRAM[val - 65408]\n elif val == 65535:\n return self.mem[65535]\n raise ValueError('Access out of bounds')\n\n def __setitem__(self, key: int, val: int) ->None:\n if key < 32768:\n self.mbc[key] = val\n elif key < 40960:\n self._vram[key - 32768] = val\n elif key < 49152:\n if self.mbc.ram_enabled:\n self._eram[key - 40960] = val\n elif key < 57344:\n self._wram[key - 49152] = val\n elif key < 65024:\n self._wram[key - 57344] = val\n elif key < 65184:\n self.OAM[key - 65024] = val\n elif key < 65280:\n pass\n elif key < 65408:\n if key in self._io_handlers:\n self._io_handlers[key].value = val\n if key == 65280:\n self._ui.input = val\n elif key == 65281:\n self.link_buffer = val\n elif key == 65282:\n if val == 129:\n self.serial_buff += chr(self.link_buffer)\n if self.link_buffer == ord('\\n'):\n print(self.serial_buff, end='', file=sys.stderr)\n if self.serial_buff == 'Passed\\n':\n pass\n elif self.serial_buff == 'Failed\\n':\n pass\n self.serial_buff = ''\n else:\n self.IO[key - 65280] = val\n elif key < 65535:\n self._HiRAM[key - 65408] = val\n else:\n self.mem[65535] = val\n\n def add_io_handler(self, val: int, handler: Register) ->None:\n self._io_handlers[val] = handler\n",
"step-4": "from mbc import MBC\nimport random\nimport sys\nfrom typing import Dict\nfrom interface import Interface\nfrom reg import Register, HandlerProxy\nIE = 65535\nDIV = 65284\nTIMA = 65285\nTMA = 65286\nTAC = 65287\nIF = 65295\nLY = 65348\n\n\nclass MMU:\n\n def __init__(self, interface: Interface, mbc: MBC) ->None:\n self._ui = interface\n self.mem = bytearray(random.getrandbits(8) for _ in range(65536))\n view = memoryview(self.mem)\n self._rom0 = view[0:16384]\n self._rom1 = view[16384:32768]\n self._vram = view[32768:40960]\n self._eram = view[40960:49152]\n self._wram = view[49152:57344]\n self._wram2 = view[57344:65024]\n self.OAM = view[65024:65184]\n self.IO = view[65280:65408]\n self._HiRAM = view[65408:65535]\n self.view = view\n self.mbc = mbc\n self.mbc.bank0 = self._rom0\n self.mbc.bank1 = self._rom1\n self.view[65024:65535] = bytearray([(0) for _ in range(511)])\n self.mem[65535] = 255\n self.link_buffer = 0\n self.serial_buff = ''\n self._io_handlers: Dict[int, Register] = {}\n self.add_io_handler(65350, HandlerProxy(self.dma))\n self.add_io_handler(65360, HandlerProxy(self.mbc.disable_bootrom))\n\n def dma(self, val: int) ->None:\n dest = 65024\n offset = val * 256\n for n in range(160):\n self.mem[dest + n] = self.mem[n + offset]\n\n def __getitem__(self, val: int) ->int:\n if val < 57344:\n return self.view[val]\n elif val < 65024:\n return self.view[val - 8192]\n elif val < 65152:\n return self.OAM[val - 65024]\n elif val < 65280:\n return 255\n elif val < 65408:\n if val in self._io_handlers:\n return self._io_handlers[val].value\n elif val == 65280:\n return self._ui.input\n else:\n return self.IO[val - 65280]\n elif val < 65535:\n return self._HiRAM[val - 65408]\n elif val == 65535:\n return self.mem[65535]\n raise ValueError('Access out of bounds')\n\n def __setitem__(self, key: int, val: int) ->None:\n if key < 32768:\n self.mbc[key] = val\n elif key < 40960:\n self._vram[key - 32768] = val\n elif key < 49152:\n if self.mbc.ram_enabled:\n self._eram[key - 40960] = val\n elif key < 57344:\n self._wram[key - 49152] = val\n elif key < 65024:\n self._wram[key - 57344] = val\n elif key < 65184:\n self.OAM[key - 65024] = val\n elif key < 65280:\n pass\n elif key < 65408:\n if key in self._io_handlers:\n self._io_handlers[key].value = val\n if key == 65280:\n self._ui.input = val\n elif key == 65281:\n self.link_buffer = val\n elif key == 65282:\n if val == 129:\n self.serial_buff += chr(self.link_buffer)\n if self.link_buffer == ord('\\n'):\n print(self.serial_buff, end='', file=sys.stderr)\n if self.serial_buff == 'Passed\\n':\n pass\n elif self.serial_buff == 'Failed\\n':\n pass\n self.serial_buff = ''\n else:\n self.IO[key - 65280] = val\n elif key < 65535:\n self._HiRAM[key - 65408] = val\n else:\n self.mem[65535] = val\n\n def add_io_handler(self, val: int, handler: Register) ->None:\n self._io_handlers[val] = handler\n",
"step-5": "from mbc import MBC\nimport random\nimport sys\nfrom typing import Dict\n\nfrom interface import Interface\nfrom reg import Register, HandlerProxy\n\n# I/O Registers\nIE = 0xFFFF\nDIV = 0xFF04 \nTIMA= 0xFF05\nTMA = 0xFF06\nTAC = 0xFF07\nIF = 0xFF0F\nLY = 0xFF44\n\n\n\nclass MMU():\n\n #0000\t3FFF\t16KB ROM bank 00\tFrom cartridge, usually a fixed bank\n #4000\t7FFF\t16KB ROM Bank 01~NN\tFrom cartridge, switchable bank via MBC (if any)\n #8000\t9FFF\t8KB Video RAM (VRAM)\tOnly bank 0 in Non-CGB mode\n #Switchable bank 0/1 in CGB mode\n #\n #A000\tBFFF\t8KB External RAM\tIn cartridge, switchable bank if any\n #C000\tCFFF\t4KB Work RAM (WRAM) bank 0\t\n #D000\tDFFF\t4KB Work RAM (WRAM) bank 1~N\tOnly bank 1 in Non-CGB mode\n #Switchable bank 1~7 in CGB mode\n #\n #E000\tFDFF\tMirror of C000~DDFF (ECHO RAM)\tTypically not used\n #FE00\tFE9F\tSprite attribute table (OAM)\t\n #FEA0\tFEFF\tNot Usable\t\n #FF00\tFF7F\tI/O Registers\t\n #FF80\tFFFE\tHigh RAM (HRAM)\t\n #FFFF\tFFFF\tInterrupts Enable Register (IE)\n\n def __init__(self, interface:Interface, mbc:MBC) -> None:\n self._ui = interface\n\n self.mem = bytearray(random.getrandbits(8) for _ in range(65536)) # type: ignore # Randomise RAM\n view = memoryview(self.mem)\n self._rom0 = view[0:0x4000]\n self._rom1 = view[0x4000:0x8000]\n self._vram = view[0x8000:0xA000]\n self._eram = view[0xA000:0xC000]\n self._wram = view[0xC000:0xE000]\n self._wram2 = view[0xE000:0xFE00]\n self.OAM = view[0xFE00:0xFEA0]\n self.IO = view[0xFF00:0xFF80]\n self._HiRAM = view[0xFF80:0xFFFF]\n\n self.view = view\n self.mbc = mbc\n self.mbc.bank0 = self._rom0\n self.mbc.bank1 = self._rom1\n\n self.view[0xFE00:0xFFFF] = bytearray([0x00 for _ in range(0x1FF)]) # IO, etc defaults to blank\n self.mem[0xFFFF] = 0xFF # IE\n\n self.link_buffer = 0\n\n self.serial_buff = \"\"\n self._io_handlers:Dict[int, Register] = {}\n self.add_io_handler(0xFF46, HandlerProxy(self.dma))\n # Add bootrom disable handler\n self.add_io_handler(0xFF50, HandlerProxy(self.mbc.disable_bootrom))\n\n def dma(self, val:int) -> None:\n dest = 0xFE00\n offset = val * 0x100\n for n in range(0xA0):\n self.mem[dest + n] = self.mem[n + offset]\n\n def __getitem__(self, val:int) -> int:\n if val < 0xE000:\n return self.view[val]\n elif val < 0xFE00:\n # Echo RAM, subtract 0x2000\n return self.view[val-0x2000]\n elif val < 0xFE80:\n return self.OAM[val-0xFE00]\n elif val < 0xFF00:\n return 0xFF\n elif val < 0xFF80:\n if val in self._io_handlers:\n return self._io_handlers[val].value\n elif val == 0xFF00:\n return self._ui.input\n else:\n return self.IO[val-0xFF00]\n elif val < 0xFFFF:\n return self._HiRAM[val-0xFF80]\n elif val == 0xFFFF:\n return self.mem[0xFFFF]\n raise ValueError(\"Access out of bounds\")\n\n def __setitem__(self, key:int, val:int) -> None:\n if key < 0x8000:\n self.mbc[key] = val\n elif key < 0xA000:\n\t self._vram[key-0x8000] = val\n elif key < 0xC000:\n if self.mbc.ram_enabled:\n # TODO: Read $0x149 and determine RAM Size\n # TODO: Pass to MBC\n self._eram[key-0xA000] = val\n elif key < 0xE000:\n\t self._wram[key-0xC000] = val\n elif key < 0xFE00:\n\t self._wram[key-0xE000] = val\n elif key < 0xFEA0:\n\t self.OAM[key-0xFE00] = val\n elif key < 0xFF00:\n pass\n elif key < 0xFF80:\n if key in self._io_handlers:\n self._io_handlers[key].value = val\n if key == 0xFF00:\n self._ui.input = val\n elif key == 0xFF01:\n self.link_buffer = val\n elif key == 0xFF02:\n if val == 0x81:\n self.serial_buff += chr(self.link_buffer)\n if self.link_buffer == ord(\"\\n\"):\n print(self.serial_buff, end='', file=sys.stderr)\n # Test ROM Routines\n if self.serial_buff == \"Passed\\n\":\n #sys.exit(0)\n pass\n elif self.serial_buff == \"Failed\\n\":\n #sys.exit(1)\n pass\n self.serial_buff = \"\"\n else:\n self.IO[key-0xFF00] = val\n elif key < 0xFFFF:\n\t self._HiRAM[key-0xFF80] = val\n else:\n self.mem[65535] = val\n\n def add_io_handler(self, val:int, handler:Register) -> None:\n self._io_handlers[val] = handler\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import pygame
import wave
import threading
import numpy as np
import pylab
import struct
import io
from PIL import Image
import sounddevice as sd
# 处理音频频谱
# voice.wav 格式:8000 rate 16bit 单声道
class SpectrumMap:
def __init__(self):
FILENAME = 'Sound/SoundResource/voice.wav'
self.wavefile = wave.open(FILENAME, 'r')
self.nchannels = self.wavefile.getnchannels()
self.sample_width = self.wavefile.getsampwidth()
self.framerate = self.wavefile.getframerate()
self.numframes = self.wavefile.getnframes()
def seek(self, frame: int):
self.wavefile.setpos(frame)
def map(self, count: int, clear: bool=True):
if clear:
pylab.plt.clf()
y = np.zeros(count)
for i in range(count):
val = self.wavefile.readframes(1)
left = val[0:2]
try:
v = struct.unpack('h', left)[0]
y[i] = v
except struct.error:
pass
data = io.BytesIO()
pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
crop = image.crop((81, 59, 575, 426))
# crop = image
return crop
def raw(self, count, clear: bool=True):
if clear:
pylab.plt.clf()
y = np.zeros(count)
for i in range(count):
val = self.wavefile.readframes(1)
left = val[0:2]
try:
v = struct.unpack('h', left)[0]
y[i] = v
except struct.error:
pass
data = io.BytesIO()
# y = abs(np.fft.fft(y) * self.nchannels)
y = y[:len(y)//2]
# pylab.specgram(y, NFFT=1024, Fs=self.framerate, noverlap=900)
pylab.plt.ylim(-32768, 32768)
pylab.plot(range(count//2), y)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
crop = image.crop((81, 59, 575, 426))
# crop = image
return crop
@staticmethod
def blend(sp1, sp2, count: int):
im1 = sp1.map(count, clear=True)
im2 = sp2.raw(count, clear=False)
res = Image.blend(im1, im2, 0.5)
return res
# 处理音频频谱 - 尝试实时录音
# 0 Microsoft 声音映射器 - Output, MME (0 in, 2 out)
# < 1 扬声器 (Realtek High Definition, MME (0 in, 2 out)
# 2 主声音驱动程序, Windows DirectSound (0 in, 2 out)
# 3 扬声器 (Realtek High Definition Audio), Windows DirectSound (0 in, 2 out)
# 4 扬声器 (Realtek High Definition Audio), Windows WASAPI (0 in, 2 out)
# 5 Speakers (Realtek HD Audio output), Windows WDM-KS (0 in, 6 out)
# 6 立体声混音 (Realtek HD Audio Stereo input), Windows WDM-KS (2 in, 0 out)
# 7 线路输入 (Realtek HD Audio Line input), Windows WDM-KS (2 in, 0 out)
# 8 FrontMic (Realtek HD Audio Front Mic input), Windows WDM-KS (2 in, 0 out)
# 9 麦克风 (Realtek HD Audio Mic input), Windows WDM-KS (2 in, 0 out)
# fs = 44100 # Hz
# length = 5 # s
# recording = sd.rec(frames=fs * length, samplerate=fs, blocking=True, channels=1)
class SpectrumMap2:
def __init__(self):
devices = sd.query_devices()
device = 11
for i in range(len(devices)):
d = devices[i]
if '立体声混音' in d['name']:
device = i
sd.default.device[0] = device
print('采用', devices[device]['name'], '录音')
self.nchannels = 1
self.framerate = 44100
def record(self, period: float):
recording = sd.rec(frames=int(self.framerate * period),
samplerate=self.framerate, blocking=True, channels=self.nchannels, dtype='int16')
return recording.reshape((recording.size, ))
def map(self, ndata, clear: bool=True):
if clear:
pylab.plt.clf()
y = ndata
data = io.BytesIO()
pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
# crop = image.crop((81, 59, 575, 426))
crop = image
return crop
@staticmethod
def raw(ndata, clear: bool=True):
if clear:
pylab.plt.clf()
y = ndata
count = len(ndata)
data = io.BytesIO()
# y = abs(np.fft.fft(y) * self.nchannels)
y = y[:len(y)//2]
pylab.plt.ylim(-32768, 32768)
pylab.plot(range(count//2), y)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
# crop = image.crop((81, 59, 575, 426))
crop = image
return crop
# @staticmethod
# def blend(sp1, sp2, ndata):
# im1 = sp1.map(ndata, clear=True)
# im2 = sp2.raw(ndata, clear=False)
# res = Image.blend(im1, im2, 0.5)
# return res
def fetch(self, period: float):
ndata = self.record(period)
# im1 = self.map(ndata, clear=True)
im2 = self.raw(ndata, clear=True)
# res = Image.blend(im1, im2, 0.5)
res = im2
return res
class Sound:
@staticmethod
def load():
pygame.mixer.init()
filename_song = 'Sound/SoundResource/world.execute(me);.mp3'
pygame.mixer.music.load(filename_song)
@staticmethod
def play():
pygame.mixer.music.play()
@staticmethod
def pause():
pygame.mixer.music.pause()
@staticmethod
def stop():
pygame.mixer.music.stop()
|
normal
|
{
"blob_id": "fbde00d727d7ea99d1a7704f46cb9850c8b210d7",
"index": 2610,
"step-1": "<mask token>\n\n\nclass SpectrumMap:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-2": "<mask token>\n\n\nclass SpectrumMap:\n <mask token>\n <mask token>\n <mask token>\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-3": "<mask token>\n\n\nclass SpectrumMap:\n <mask token>\n\n def seek(self, frame: int):\n self.wavefile.setpos(frame)\n\n def map(self, count: int, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-4": "import pygame\nimport wave\nimport threading\nimport numpy as np\nimport pylab\nimport struct\nimport io\nfrom PIL import Image\nimport sounddevice as sd\n\n\nclass SpectrumMap:\n\n def __init__(self):\n FILENAME = 'Sound/SoundResource/voice.wav'\n self.wavefile = wave.open(FILENAME, 'r')\n self.nchannels = self.wavefile.getnchannels()\n self.sample_width = self.wavefile.getsampwidth()\n self.framerate = self.wavefile.getframerate()\n self.numframes = self.wavefile.getnframes()\n\n def seek(self, frame: int):\n self.wavefile.setpos(frame)\n\n def map(self, count: int, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-5": "import pygame\nimport wave\nimport threading\nimport numpy as np\nimport pylab\nimport struct\nimport io\nfrom PIL import Image\nimport sounddevice as sd\n\n\n# 处理音频频谱\n# voice.wav 格式:8000 rate 16bit 单声道\nclass SpectrumMap:\n def __init__(self):\n FILENAME = 'Sound/SoundResource/voice.wav'\n self.wavefile = wave.open(FILENAME, 'r')\n\n self.nchannels = self.wavefile.getnchannels()\n self.sample_width = self.wavefile.getsampwidth()\n self.framerate = self.wavefile.getframerate()\n self.numframes = self.wavefile.getnframes()\n\n def seek(self, frame: int):\n self.wavefile.setpos(frame)\n\n def map(self, count: int, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n # crop = image\n return crop\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n\n data = io.BytesIO()\n # y = abs(np.fft.fft(y) * self.nchannels)\n y = y[:len(y)//2]\n # pylab.specgram(y, NFFT=1024, Fs=self.framerate, noverlap=900)\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count//2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n # crop = image\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\n# 处理音频频谱 - 尝试实时录音\n# 0 Microsoft 声音映射器 - Output, MME (0 in, 2 out)\n# < 1 扬声器 (Realtek High Definition, MME (0 in, 2 out)\n# 2 主声音驱动程序, Windows DirectSound (0 in, 2 out)\n# 3 扬声器 (Realtek High Definition Audio), Windows DirectSound (0 in, 2 out)\n# 4 扬声器 (Realtek High Definition Audio), Windows WASAPI (0 in, 2 out)\n# 5 Speakers (Realtek HD Audio output), Windows WDM-KS (0 in, 6 out)\n# 6 立体声混音 (Realtek HD Audio Stereo input), Windows WDM-KS (2 in, 0 out)\n# 7 线路输入 (Realtek HD Audio Line input), Windows WDM-KS (2 in, 0 out)\n# 8 FrontMic (Realtek HD Audio Front Mic input), Windows WDM-KS (2 in, 0 out)\n# 9 麦克风 (Realtek HD Audio Mic input), Windows WDM-KS (2 in, 0 out)\n\n# fs = 44100 # Hz\n# length = 5 # s\n# recording = sd.rec(frames=fs * length, samplerate=fs, blocking=True, channels=1)\nclass SpectrumMap2:\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period),\n samplerate=self.framerate, blocking=True, channels=self.nchannels, dtype='int16')\n return recording.reshape((recording.size, ))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n # crop = image.crop((81, 59, 575, 426))\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n\n data = io.BytesIO()\n # y = abs(np.fft.fft(y) * self.nchannels)\n y = y[:len(y)//2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count//2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n # crop = image.crop((81, 59, 575, 426))\n crop = image\n return crop\n\n # @staticmethod\n # def blend(sp1, sp2, ndata):\n # im1 = sp1.map(ndata, clear=True)\n # im2 = sp2.raw(ndata, clear=False)\n # res = Image.blend(im1, im2, 0.5)\n # return res\n\n def fetch(self, period: float):\n ndata = self.record(period)\n # im1 = self.map(ndata, clear=True)\n im2 = self.raw(ndata, clear=True)\n # res = Image.blend(im1, im2, 0.5)\n res = im2\n return res\n\n\nclass Sound:\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-ids": [
12,
14,
16,
18,
19
]
}
|
[
12,
14,
16,
18,
19
] |
from django import forms
from .models import Note
class NoteForm(forms.ModelForm):
class Meta:
model = Note
fields = ['title', 'text']
class NoteFullForm(NoteForm):
note_id = forms.IntegerField(required=False)
images = forms.FileField(widget=forms.ClearableFileInput(attrs={
'multiple': True}), required=False)
tags = forms.CharField(max_length=50, required=False)
class Meta(NoteForm.Meta):
fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']
|
normal
|
{
"blob_id": "e0fd9663a5635873f4ffc0f73aff5106c0933781",
"index": 9180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NoteFullForm(NoteForm):\n note_id = forms.IntegerField(required=False)\n images = forms.FileField(widget=forms.ClearableFileInput(attrs={\n 'multiple': True}), required=False)\n tags = forms.CharField(max_length=50, required=False)\n\n\n class Meta(NoteForm.Meta):\n fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']\n",
"step-3": "<mask token>\n\n\nclass NoteForm(forms.ModelForm):\n\n\n class Meta:\n model = Note\n fields = ['title', 'text']\n\n\nclass NoteFullForm(NoteForm):\n note_id = forms.IntegerField(required=False)\n images = forms.FileField(widget=forms.ClearableFileInput(attrs={\n 'multiple': True}), required=False)\n tags = forms.CharField(max_length=50, required=False)\n\n\n class Meta(NoteForm.Meta):\n fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']\n",
"step-4": "from django import forms\nfrom .models import Note\n\n\nclass NoteForm(forms.ModelForm):\n\n\n class Meta:\n model = Note\n fields = ['title', 'text']\n\n\nclass NoteFullForm(NoteForm):\n note_id = forms.IntegerField(required=False)\n images = forms.FileField(widget=forms.ClearableFileInput(attrs={\n 'multiple': True}), required=False)\n tags = forms.CharField(max_length=50, required=False)\n\n\n class Meta(NoteForm.Meta):\n fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
from timeit import default_timer as timer
import numpy as np
bets1 = [ # lowest config possible
0.00000001,
0.00000004,
0.0000001,
0.0000005,
0.00000150,
0.00000500,
0.00001000
]
bets2 = [ # 2 is 10x 1
0.0000001,
0.0000004,
0.000001,
0.000005,
0.0000150,
0.0000500,
0.0001000
]
# options
max_seeds = 100
max_rolls = 100000 # 100k is around 8-24 hours of fastplay
seed_wins = 0
num_rolls = []
start_position = np.random.randint(1, 100000000)
for seed in range(start_position, start_position+max_seeds):
# current game round stats
cur_wins = 0
max_wins = 0
cur_losses = 0
max_losses = 0
win_streak = []
loss_streak = []
# seed data and timer
np.random.seed(seed)
start_time = timer()
start_bal = cur_bal = 0.001 # 10$ reasonable start
# actual Play
for index in range(max_rolls):
# make bets
bets = [ # this appears to be working, a function of cur_bal
0.00000001,
float('{:.8f}'.format(cur_bal * 0.001)),
float('{:.8f}'.format(cur_bal * 0.002)),
float('{:.8f}'.format(cur_bal * 0.005)),
float('{:.8f}'.format(cur_bal * 0.01)),
float('{:.8f}'.format(cur_bal * 0.05)),
float('{:.8f}'.format(cur_bal * 0.12)),
float('{:.8f}'.format(cur_bal * 0.3)),
]
# if Winning... Stop
if (cur_bal / start_bal - 1)*100 > 10000 or index==max_rolls-1:
print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(
seed, index, cur_bal, (cur_bal/start_bal-1)*100))
print('Max_L: {}'.format(max_losses))
print('Max_W: {}'.format(max_wins))
#print('Won The Day!')
seed_wins += 1
num_rolls.append(index)
break
# get bet
if cur_losses < len(bets):
bet = bets[cur_losses]
else:
bet = bets[0]
if bet < bets[0]: # dont bet less than 8 decimal places
bet = bets[0]
# if Losing ... Stop
if cur_bal <= 0:
break
if bet >= cur_bal:
#print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(
# seed, index, cur_bal, (cur_bal/start_bal-1)*100))
#print('Game Over man!')
break
## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> MAKE PLAY
roll = np.random.randint(1, 10000)
win = True if roll < 3900 else False ## 3900/10000 appears to be a good handicap
## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# fix balance
if win:
loss_streak.append(cur_losses)
cur_bal += bet * 2
cur_losses = 0
cur_wins += 1
else:
win_streak.append(cur_wins)
cur_bal -= bet
cur_losses += 1
cur_wins = 0
# fix maxes
if cur_losses > max_losses:
max_losses = cur_losses
if cur_wins > max_wins:
max_wins = cur_wins
# /actual play
# seed stuff
seed_time = timer() - start_time
print('Seed_time: {:.2f}'.format(seed_time), end='\r') # you will see this a lot if losing
# Finished All Seeds
print('Won {}/{} Seeds'.format(seed_wins,max_seeds))
if seed_wins: # if won anything.
print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean())))
|
normal
|
{
"blob_id": "4c66ab6110e81bb88fc6916a1695e0f23e6e0e9d",
"index": 6754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor seed in range(start_position, start_position + max_seeds):\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n win_streak = []\n loss_streak = []\n np.random.seed(seed)\n start_time = timer()\n start_bal = cur_bal = 0.001\n for index in range(max_rolls):\n bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(\n '{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(\n cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(\n cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]\n if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'\n .format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n seed_wins += 1\n num_rolls.append(index)\n break\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]:\n bet = bets[0]\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n break\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r')\nprint('Won {}/{} Seeds'.format(seed_wins, max_seeds))\nif seed_wins:\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))\n )\n",
"step-3": "<mask token>\nbets1 = [1e-08, 4e-08, 1e-07, 5e-07, 1.5e-06, 5e-06, 1e-05]\nbets2 = [1e-07, 4e-07, 1e-06, 5e-06, 1.5e-05, 5e-05, 0.0001]\nmax_seeds = 100\nmax_rolls = 100000\nseed_wins = 0\nnum_rolls = []\nstart_position = np.random.randint(1, 100000000)\nfor seed in range(start_position, start_position + max_seeds):\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n win_streak = []\n loss_streak = []\n np.random.seed(seed)\n start_time = timer()\n start_bal = cur_bal = 0.001\n for index in range(max_rolls):\n bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(\n '{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(\n cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(\n cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]\n if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'\n .format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n seed_wins += 1\n num_rolls.append(index)\n break\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]:\n bet = bets[0]\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n break\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r')\nprint('Won {}/{} Seeds'.format(seed_wins, max_seeds))\nif seed_wins:\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))\n )\n",
"step-4": "from timeit import default_timer as timer\nimport numpy as np\nbets1 = [1e-08, 4e-08, 1e-07, 5e-07, 1.5e-06, 5e-06, 1e-05]\nbets2 = [1e-07, 4e-07, 1e-06, 5e-06, 1.5e-05, 5e-05, 0.0001]\nmax_seeds = 100\nmax_rolls = 100000\nseed_wins = 0\nnum_rolls = []\nstart_position = np.random.randint(1, 100000000)\nfor seed in range(start_position, start_position + max_seeds):\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n win_streak = []\n loss_streak = []\n np.random.seed(seed)\n start_time = timer()\n start_bal = cur_bal = 0.001\n for index in range(max_rolls):\n bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(\n '{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(\n cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(\n cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]\n if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'\n .format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n seed_wins += 1\n num_rolls.append(index)\n break\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]:\n bet = bets[0]\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n break\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r')\nprint('Won {}/{} Seeds'.format(seed_wins, max_seeds))\nif seed_wins:\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))\n )\n",
"step-5": "from timeit import default_timer as timer\nimport numpy as np\n\nbets1 = [ # lowest config possible\n 0.00000001,\n 0.00000004,\n 0.0000001,\n 0.0000005,\n 0.00000150,\n 0.00000500,\n 0.00001000\n]\nbets2 = [ # 2 is 10x 1\n 0.0000001,\n 0.0000004,\n 0.000001,\n 0.000005,\n 0.0000150,\n 0.0000500,\n 0.0001000\n]\n\n# options\nmax_seeds = 100\nmax_rolls = 100000 # 100k is around 8-24 hours of fastplay\n\nseed_wins = 0\nnum_rolls = []\nstart_position = np.random.randint(1, 100000000)\n\n\n\nfor seed in range(start_position, start_position+max_seeds):\n # current game round stats\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n\n win_streak = []\n loss_streak = []\n # seed data and timer\n np.random.seed(seed)\n start_time = timer()\n\n start_bal = cur_bal = 0.001 # 10$ reasonable start\n # actual Play\n for index in range(max_rolls):\n # make bets\n bets = [ # this appears to be working, a function of cur_bal\n 0.00000001,\n float('{:.8f}'.format(cur_bal * 0.001)),\n float('{:.8f}'.format(cur_bal * 0.002)),\n float('{:.8f}'.format(cur_bal * 0.005)),\n float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)),\n float('{:.8f}'.format(cur_bal * 0.12)),\n float('{:.8f}'.format(cur_bal * 0.3)),\n ]\n\n # if Winning... Stop\n if (cur_bal / start_bal - 1)*100 > 10000 or index==max_rolls-1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(\n seed, index, cur_bal, (cur_bal/start_bal-1)*100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n #print('Won The Day!')\n seed_wins += 1\n num_rolls.append(index)\n break\n\n # get bet\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]: # dont bet less than 8 decimal places\n bet = bets[0]\n\n # if Losing ... Stop\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n #print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(\n # seed, index, cur_bal, (cur_bal/start_bal-1)*100))\n #print('Game Over man!')\n break\n\n ## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> MAKE PLAY\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False ## 3900/10000 appears to be a good handicap\n ## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n # fix balance\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n\n # fix maxes\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n # /actual play\n # seed stuff\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r') # you will see this a lot if losing\n# Finished All Seeds\nprint('Won {}/{} Seeds'.format(seed_wins,max_seeds))\nif seed_wins: # if won anything.\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean())))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from Player import Player
class GameSequence:
'''
GameSequence summary: Keeps track of player turn sequence and Game end
Functionalities
-start game
-must start turns
-change turns
-end turns
-end game
'''
def __init__(self, ArrayofPlayers):
if (len(ArrayofPlayers) < 2):
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK,NOTHING]
self.currentMode = NOTHING
def changeMode(self,number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
'''
does some intro animaton -> starts game
'''
return
def startTurn(self):
self.players[self.currentTurn].changeTurn(True)
'''
maybe some camera change animation to player location
'''
return
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
|
normal
|
{
"blob_id": "bdfd941be29a31d6c1bbedd270dadac844f49fc4",
"index": 1198,
"step-1": "<mask token>\n\n\nclass GameSequence:\n <mask token>\n <mask token>\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n <mask token>\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-2": "<mask token>\n\n\nclass GameSequence:\n <mask token>\n\n def __init__(self, ArrayofPlayers):\n if len(ArrayofPlayers) < 2:\n return False\n self.players = ArrayofPlayers\n self.currentTurn = None\n NOTHING = 2\n ATTACK = 1\n MOVE = 0\n self.modes = [MOVE, ATTACK, NOTHING]\n self.currentMode = NOTHING\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n <mask token>\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-3": "<mask token>\n\n\nclass GameSequence:\n <mask token>\n\n def __init__(self, ArrayofPlayers):\n if len(ArrayofPlayers) < 2:\n return False\n self.players = ArrayofPlayers\n self.currentTurn = None\n NOTHING = 2\n ATTACK = 1\n MOVE = 0\n self.modes = [MOVE, ATTACK, NOTHING]\n self.currentMode = NOTHING\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n\n def startTurn(self):\n self.players[self.currentTurn].changeTurn(True)\n \"\"\"\n maybe some camera change animation to player location\n \"\"\"\n return\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-4": "<mask token>\n\n\nclass GameSequence:\n \"\"\"\n GameSequence summary: Keeps track of player turn sequence and Game end\n Functionalities\n -start game\n -must start turns\n -change turns\n -end turns\n -end game\n\n \"\"\"\n\n def __init__(self, ArrayofPlayers):\n if len(ArrayofPlayers) < 2:\n return False\n self.players = ArrayofPlayers\n self.currentTurn = None\n NOTHING = 2\n ATTACK = 1\n MOVE = 0\n self.modes = [MOVE, ATTACK, NOTHING]\n self.currentMode = NOTHING\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n\n def startTurn(self):\n self.players[self.currentTurn].changeTurn(True)\n \"\"\"\n maybe some camera change animation to player location\n \"\"\"\n return\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-5": "from Player import Player\r\n\r\n\r\n\r\n\r\nclass GameSequence:\r\n '''\r\n GameSequence summary: Keeps track of player turn sequence and Game end\r\n Functionalities\r\n -start game\r\n -must start turns\r\n -change turns\r\n -end turns\r\n -end game\r\n\r\n '''\r\n\r\n def __init__(self, ArrayofPlayers):\r\n if (len(ArrayofPlayers) < 2):\r\n return False\r\n\r\n self.players = ArrayofPlayers\r\n self.currentTurn = None\r\n NOTHING = 2\r\n ATTACK = 1\r\n MOVE = 0\r\n\r\n self.modes = [MOVE, ATTACK,NOTHING]\r\n self.currentMode = NOTHING\r\n\r\n def changeMode(self,number):\r\n self.currentMode = self.modes[number]\r\n def startGame(self):\r\n self.currentTurn = 0\r\n '''\r\n does some intro animaton -> starts game\r\n\r\n '''\r\n return\r\n def startTurn(self):\r\n self.players[self.currentTurn].changeTurn(True)\r\n '''\r\n maybe some camera change animation to player location\r\n '''\r\n return\r\n\r\n def getCurrentPlayer(self):\r\n return self.players[self.currentTurn]\r\n\r\n def changeTurn(self):\r\n self.players[self.currentTurn].changeTurn(False)\r\n self.currentTurn += 1\r\n self.currentTurn = self.currentTurn % len(self.players)\r\n\r\n def endTurn(self):\r\n self.players[self.currentTurn].changeTurn(False)\r\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
n = int(input())
num = list(map(int, input().split()))
plus_cnt = 0
div_max = 0
for i in num:
div = 0
while i > 0:
if i % 2 == 0:
i //= 2
div += 1
else:
i -= 1
plus_cnt += 1
div_max = max(div_max, div)
print(plus_cnt + div_max)
|
normal
|
{
"blob_id": "9247896850e5282265cd08240f6f505e675ce5f0",
"index": 5904,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in num:\n div = 0\n while i > 0:\n if i % 2 == 0:\n i //= 2\n div += 1\n else:\n i -= 1\n plus_cnt += 1\n div_max = max(div_max, div)\nprint(plus_cnt + div_max)\n",
"step-3": "n = int(input())\nnum = list(map(int, input().split()))\nplus_cnt = 0\ndiv_max = 0\nfor i in num:\n div = 0\n while i > 0:\n if i % 2 == 0:\n i //= 2\n div += 1\n else:\n i -= 1\n plus_cnt += 1\n div_max = max(div_max, div)\nprint(plus_cnt + div_max)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if self.front == None:
self.front = self.last = new_nodef
self.count += 1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count += 1
<|reserved_special_token_0|>
def print_list(self):
if self.front == None:
return
temp = self.front
while temp != None:
print(temp.data)
temp = temp.next
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
<|reserved_special_token_0|>
def size(self):
print(self.count)
<|reserved_special_token_0|>
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if self.front == None:
self.front = self.last = new_nodef
self.count += 1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count += 1
<|reserved_special_token_0|>
def print_list(self):
if self.front == None:
return
temp = self.front
while temp != None:
print(temp.data)
temp = temp.next
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if self.count == 0:
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input(
'Enter the string to check whether palindrome or not :'))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print('Is palindrome :', pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
<|reserved_special_token_0|>
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if self.front == None:
self.front = self.last = new_nodef
self.count += 1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count += 1
def add_last(self, data):
new_nodeb = Node(data)
if self.last == None:
self.last = self.front = new_nodeb
self.count += 1
else:
new_nodeb.prev = self.last
self.last.next = new_nodeb
self.last = new_nodeb
self.count += 1
def print_list(self):
if self.front == None:
return
temp = self.front
while temp != None:
print(temp.data)
temp = temp.next
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if self.count == 0:
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input(
'Enter the string to check whether palindrome or not :'))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print('Is palindrome :', pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if self.front == None:
self.front = self.last = new_nodef
self.count += 1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count += 1
def add_last(self, data):
new_nodeb = Node(data)
if self.last == None:
self.last = self.front = new_nodeb
self.count += 1
else:
new_nodeb.prev = self.last
self.last.next = new_nodeb
self.last = new_nodeb
self.count += 1
def print_list(self):
if self.front == None:
return
temp = self.front
while temp != None:
print(temp.data)
temp = temp.next
def remove_front(self):
if self.front == None:
return
else:
self.front = self.front.next
if self.front == None:
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if self.last == None:
return
else:
self.last = self.last.prev
if self.last == None:
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if self.count == 0:
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input(
'Enter the string to check whether palindrome or not :'))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print('Is palindrome :', pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while self.count != 0:
if self.front.data == self.last.data:
llist.remove_front()
if self.count > 1:
llist.remove_last()
else:
return False
if self.count == 1:
break
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self,data):
self.data = data
self.next = None
self.prev = None
class dequeue:
def __init__(self):
self.front = None
self.last = None
self.count = 0
def add_front(self, data):
new_nodef = Node(data)
if(self.front == None):
self.front = self.last = new_nodef
self.count +=1
else:
new_nodef.next = self.front
self.front.prev = new_nodef
self.front = new_nodef
self.count +=1
def add_last(self,data):
new_nodeb = Node(data)
if(self.last == None):
self.last = self.front = new_nodeb
self.count +=1
else:
new_nodeb.prev = self.last
self.last.next = new_nodeb
self.last = new_nodeb
self.count +=1
def print_list(self):
if(self.front == None):
return
temp = self.front
while(temp != None):
print(temp.data)
temp = temp.next
def remove_front(self):
if(self.front == None):
return
else:
self.front = self.front.next
if(self.front == None):
self.last = None
return
self.count -= 1
self.front.prev = None
def remove_last(self):
if(self.last == None):
return
else:
self.last = self.last.prev
if(self.last == None):
self.front = None
return
self.count -= 1
self.last.next = None
def is_empty(self):
if(self.count == 0):
return True
else:
return False
def size(self):
print(self.count)
def entry(self):
pal_to_check = str(input("Enter the string to check whether palindrome or not :"))
pal_list = [str(i) for i in pal_to_check]
print(pal_list)
pal_check_con = llist.pal_check(pal_list)
print("Is palindrome :",pal_check_con)
def pal_check(self, pal_lis):
for i in pal_lis:
llist.add_front(i)
while(self.count != 0):
if(self.front.data == self.last.data):
llist.remove_front()
if(self.count > 1):
llist.remove_last()
else:
return False
if(self.count == 1):
break
return True
#Driver function
if __name__=="__main__":
llist = dequeue()
llist.entry()
|
flexible
|
{
"blob_id": "2f6e0b6a7e14ac9c5a38db6fd2b1cf23cff7144e",
"index": 172,
"step-1": "<mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n <mask token>\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n <mask token>\n\n def size(self):\n print(self.count)\n <mask token>\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n <mask token>\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-3": "class Node:\n <mask token>\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n\n def add_last(self, data):\n new_nodeb = Node(data)\n if self.last == None:\n self.last = self.front = new_nodeb\n self.count += 1\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb\n self.count += 1\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-4": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if self.front == None:\n self.front = self.last = new_nodef\n self.count += 1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count += 1\n\n def add_last(self, data):\n new_nodeb = Node(data)\n if self.last == None:\n self.last = self.front = new_nodeb\n self.count += 1\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb\n self.count += 1\n\n def print_list(self):\n if self.front == None:\n return\n temp = self.front\n while temp != None:\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if self.front == None:\n return\n else:\n self.front = self.front.next\n if self.front == None:\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if self.last == None:\n return\n else:\n self.last = self.last.prev\n if self.last == None:\n self.front = None\n return\n self.count -= 1\n self.last.next = None\n\n def is_empty(self):\n if self.count == 0:\n return True\n else:\n return False\n\n def size(self):\n print(self.count)\n\n def entry(self):\n pal_to_check = str(input(\n 'Enter the string to check whether palindrome or not :'))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print('Is palindrome :', pal_check_con)\n\n def pal_check(self, pal_lis):\n for i in pal_lis:\n llist.add_front(i)\n while self.count != 0:\n if self.front.data == self.last.data:\n llist.remove_front()\n if self.count > 1:\n llist.remove_last()\n else:\n return False\n if self.count == 1:\n break\n return True\n\n\n<mask token>\n",
"step-5": "class Node:\n\n def __init__(self,data):\n self.data = data\n self.next = None\n self.prev = None \n\nclass dequeue:\n\n def __init__(self):\n self.front = None\n self.last = None\n self.count = 0\n\n def add_front(self, data):\n new_nodef = Node(data)\n if(self.front == None):\n self.front = self.last = new_nodef\n self.count +=1\n else:\n new_nodef.next = self.front\n self.front.prev = new_nodef\n self.front = new_nodef\n self.count +=1\n\n \n def add_last(self,data):\n new_nodeb = Node(data)\n if(self.last == None):\n self.last = self.front = new_nodeb\n self.count +=1\n\n else:\n new_nodeb.prev = self.last\n self.last.next = new_nodeb\n self.last = new_nodeb \n self.count +=1\n \n def print_list(self):\n if(self.front == None):\n return\n temp = self.front\n while(temp != None):\n print(temp.data)\n temp = temp.next\n\n def remove_front(self):\n if(self.front == None):\n return\n else:\n self.front = self.front.next\n if(self.front == None):\n self.last = None\n return\n self.count -= 1\n self.front.prev = None\n\n def remove_last(self):\n if(self.last == None):\n return\n else:\n self.last = self.last.prev\n if(self.last == None):\n self.front = None\n return\n self.count -= 1 \n self.last.next = None\n \n def is_empty(self):\n if(self.count == 0):\n return True\n else: \n return False\n def size(self):\n print(self.count)\n \n\n def entry(self):\n \n pal_to_check = str(input(\"Enter the string to check whether palindrome or not :\"))\n pal_list = [str(i) for i in pal_to_check]\n print(pal_list)\n pal_check_con = llist.pal_check(pal_list)\n print(\"Is palindrome :\",pal_check_con)\n \n def pal_check(self, pal_lis): \n for i in pal_lis:\n llist.add_front(i)\n while(self.count != 0):\n if(self.front.data == self.last.data):\n llist.remove_front()\n if(self.count > 1):\n llist.remove_last() \n else:\n return False\n if(self.count == 1):\n break \n return True\n\n\n#Driver function\nif __name__==\"__main__\":\n \n llist = dequeue()\n llist.entry()\n\n",
"step-ids": [
8,
10,
12,
13,
15
]
}
|
[
8,
10,
12,
13,
15
] |
from django.db import models
#from publicservants import models
from django.utils.encoding import smart_unicode
# Create your models here.
class Score(models.Model):
#score ID - publicservant ID plus score
#sID = models.ManyToOneRel(field=PublicServant.psID)
#PS Score at time t
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
#Factors that determine Public Servant Score, include Thumbs up or down on certain criterias.
#Aggregrate values for period of time
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore= models.IntegerField(null=False, blank=False)
#Actual PSScore at 12am everyday
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps) # + smart_unicode(self.PublicServant.psID)
|
normal
|
{
"blob_id": "8c166dd4cb091dcd2d80b5ae3085b5dee77564e0",
"index": 1227,
"step-1": "<mask token>\n\n\nclass Score(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Score(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-3": "<mask token>\n\n\nclass Score(models.Model):\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n userScore = models.IntegerField(null=False, blank=False)\n ps = models.IntegerField(null=False)\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-4": "from django.db import models\nfrom django.utils.encoding import smart_unicode\n\n\nclass Score(models.Model):\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n userScore = models.IntegerField(null=False, blank=False)\n ps = models.IntegerField(null=False)\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-5": "from django.db import models\n#from publicservants import models\nfrom django.utils.encoding import smart_unicode\n\n# Create your models here.\n\n\nclass Score(models.Model):\n #score ID - publicservant ID plus score\n #sID = models.ManyToOneRel(field=PublicServant.psID)\n \n #PS Score at time t\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n \n #Factors that determine Public Servant Score, include Thumbs up or down on certain criterias.\n #Aggregrate values for period of time\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n \n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n \n userScore= models.IntegerField(null=False, blank=False)\n \n #Actual PSScore at 12am everyday\n ps = models.IntegerField(null=False)\n \n def __unicode__(self):\n return smart_unicode(self.ps) # + smart_unicode(self.PublicServant.psID)\n \n \n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""A simple script to create a motion plan."""
import os
import json
import logging
from logging.config import dictConfig
import argparse
import numpy as np
from opentrons_hardware.hardware_control.motion_planning import move_manager
from opentrons_hardware.hardware_control.motion_planning.types import (
AxisConstraints,
SystemConstraints,
MoveTarget,
vectorize,
Coordinates,
)
from typing import Dict, Any, List, cast
AXIS_NAMES = ["X", "Y", "Z", "A", "B", "C"]
log = logging.getLogger(__name__)
LOG_CONFIG: Dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"basic": {"format": "%(asctime)s %(name)s %(levelname)s %(message)s"}
},
"handlers": {
"stream_handler": {
"class": "logging.StreamHandler",
"formatter": "basic",
"level": logging.INFO,
},
},
"loggers": {
"": {
"handlers": ["stream_handler"],
"level": logging.DEBUG,
},
},
}
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(description="Motion planning script.")
parser.add_argument(
"--params-file-path",
"-p",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_params.json"),
help="the parameter file path",
)
parser.add_argument(
"--debug",
"-d",
type=bool,
required=False,
default=False,
help="set logging level to debug",
)
parser.add_argument(
"--output",
"-o",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_output.json"),
help="the output file path",
)
parser.add_argument(
"--blend-log",
"-b",
choices=["last", "all"],
required=False,
default="last",
help="output the last list or all of the blend log",
)
args = parser.parse_args()
if args.debug:
LOG_CONFIG["handlers"]["stream_handler"]["level"] = logging.DEBUG
LOG_CONFIG["loggers"][""]["level"] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, "r") as f:
params = json.load(f)
constraints: SystemConstraints[str] = {
axis: AxisConstraints.build(**params["constraints"][axis])
for axis in AXIS_NAMES
}
origin_from_file: List[float] = cast(List[float], params["origin"])
origin: Coordinates[str, np.float64] = dict(
zip(AXIS_NAMES, (np.float64(c) for c in origin_from_file))
)
target_list = [
MoveTarget.build(
dict(zip(AXIS_NAMES, target["coordinates"])), target["max_speed"]
)
for target in params["target_list"]
]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(
origin=origin,
target_list=target_list,
iteration_limit=params["iteration_limit"],
)
output = {
"moves": [v.to_dict() for v in blend_log[-1]],
"origin": list(vectorize(origin)),
}
def myconverter(obj: Any) -> Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, "w") as f:
json.dump(output, f, indent=2, default=myconverter)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "b7d75c2523dba0baaf06ba270045a4a344b8156c",
"index": 3023,
"step-1": "<mask token>\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\n<mask token>\n",
"step-2": "<mask token>\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nAXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']\nlog = logging.getLogger(__name__)\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport os\nimport json\nimport logging\nfrom logging.config import dictConfig\nimport argparse\nimport numpy as np\nfrom opentrons_hardware.hardware_control.motion_planning import move_manager\nfrom opentrons_hardware.hardware_control.motion_planning.types import AxisConstraints, SystemConstraints, MoveTarget, vectorize, Coordinates\nfrom typing import Dict, Any, List, cast\nAXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']\nlog = logging.getLogger(__name__)\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"A simple script to create a motion plan.\"\"\"\nimport os\nimport json\nimport logging\nfrom logging.config import dictConfig\nimport argparse\nimport numpy as np\n\nfrom opentrons_hardware.hardware_control.motion_planning import move_manager\nfrom opentrons_hardware.hardware_control.motion_planning.types import (\n AxisConstraints,\n SystemConstraints,\n MoveTarget,\n vectorize,\n Coordinates,\n)\nfrom typing import Dict, Any, List, cast\n\nAXIS_NAMES = [\"X\", \"Y\", \"Z\", \"A\", \"B\", \"C\"]\n\nlog = logging.getLogger(__name__)\n\nLOG_CONFIG: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"basic\": {\"format\": \"%(asctime)s %(name)s %(levelname)s %(message)s\"}\n },\n \"handlers\": {\n \"stream_handler\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"basic\",\n \"level\": logging.INFO,\n },\n },\n \"loggers\": {\n \"\": {\n \"handlers\": [\"stream_handler\"],\n \"level\": logging.DEBUG,\n },\n },\n}\n\n\ndef main() -> None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description=\"Motion planning script.\")\n parser.add_argument(\n \"--params-file-path\",\n \"-p\",\n type=str,\n required=False,\n default=os.path.join(os.path.dirname(__file__) + \"/motion_params.json\"),\n help=\"the parameter file path\",\n )\n parser.add_argument(\n \"--debug\",\n \"-d\",\n type=bool,\n required=False,\n default=False,\n help=\"set logging level to debug\",\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n type=str,\n required=False,\n default=os.path.join(os.path.dirname(__file__) + \"/motion_output.json\"),\n help=\"the output file path\",\n )\n parser.add_argument(\n \"--blend-log\",\n \"-b\",\n choices=[\"last\", \"all\"],\n required=False,\n default=\"last\",\n help=\"output the last list or all of the blend log\",\n )\n args = parser.parse_args()\n\n if args.debug:\n LOG_CONFIG[\"handlers\"][\"stream_handler\"][\"level\"] = logging.DEBUG\n LOG_CONFIG[\"loggers\"][\"\"][\"level\"] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n\n with open(args.params_file_path, \"r\") as f:\n params = json.load(f)\n\n constraints: SystemConstraints[str] = {\n axis: AxisConstraints.build(**params[\"constraints\"][axis])\n for axis in AXIS_NAMES\n }\n origin_from_file: List[float] = cast(List[float], params[\"origin\"])\n origin: Coordinates[str, np.float64] = dict(\n zip(AXIS_NAMES, (np.float64(c) for c in origin_from_file))\n )\n target_list = [\n MoveTarget.build(\n dict(zip(AXIS_NAMES, target[\"coordinates\"])), target[\"max_speed\"]\n )\n for target in params[\"target_list\"]\n ]\n\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(\n origin=origin,\n target_list=target_list,\n iteration_limit=params[\"iteration_limit\"],\n )\n\n output = {\n \"moves\": [v.to_dict() for v in blend_log[-1]],\n \"origin\": list(vectorize(origin)),\n }\n\n def myconverter(obj: Any) -> Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n\n with open(args.output, \"w\") as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#ERP PROJECT
import pyrebase
import smtplib
config = {
"apiKey": "apiKey",
"authDomain": "erproject-dd24e-default-rtdb.firebaseapp.com",
"databaseURL": "https://erproject-dd24e-default-rtdb.firebaseio.com",
"storageBucket": "erproject-dd24e-default-rtdb.appspot.com"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
db.child("Student").push({"DAY":""})
db.child("Faculty").push({"DAY":""})
student=["s1","s2","s3","s4","s5","s6","s7","s8","s9","s10"]
faculty=["f1","f2","f3","f4","f5"]
st={}
data,data1='',''
st1={}
fa={}
fa1={}
i=1
import schedule
import time
def j():
global i
import pandas as pd
st1.update({i:st})
data=pd.DataFrame(st1)
print(data)
data.to_csv('student.csv')
fa1.update({i:fa})
data1=pd.DataFrame(fa1)
print(data1)
data1.to_csv('faculty.csv')
i=i+1
while(1):
schedule.every(10).seconds.do(j)
schedule.run_pending()
time.sleep(1)
f=input("enter 's' for student,enter 'f' for faculty")
f=f.upper()
if(f=="S"):
name=input("enter student name")
if name in student:
a=input("enter 'a' for absent,enter 'l' for leave,enter 'p' for present")
a=a.upper()
if(a=="L"): #please change sender and receiver's email id for this function to work
import smtplib
server =smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login("sender@gamil.com","akki@9510")
message=name+"is on leave"
server.sendmail("sender@gamil.com","receiver@gamil.com",message)
a="A"
st.update({name:a})
from datetime import datetime
now = datetime.now() # current date and time
date_time = now.strftime("%d-%m-%Y")
db.child("Student").child("DAY").child(date_time).update({name:a})
if(f=="F"):
name=input("enter faculty name")
if name in faculty:
a=input("enter 'a' for absent,enter 'l' for leave,enter 'p' for present")
a=a.upper()
if(a=="L"):
import smtplib
server =smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login("sender@gamil.com","akki@9510")
message=name+"is on leave"
server.sendmail("sender@gamil.com","receiver@gamil.com",message)
a="A"
fa.update({name:a})
from datetime import datetime
now = datetime.now() # current date and time
date_time = now.strftime("%d-%m-%Y")
db.child("Faculty").child("DAY").child(date_time).update({name:a})
|
normal
|
{
"blob_id": "3e7e6d7a0137d91dc7437ff91a39d7f8faad675e",
"index": 7075,
"step-1": "<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\n<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-3": "<mask token>\nconfig = {'apiKey': 'apiKey', 'authDomain':\n 'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':\n 'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':\n 'erproject-dd24e-default-rtdb.appspot.com'}\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\nstudent = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']\nfaculty = ['f1', 'f2', 'f3', 'f4', 'f5']\nst = {}\ndata, data1 = '', ''\nst1 = {}\nfa = {}\nfa1 = {}\ni = 1\n<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-4": "import pyrebase\nimport smtplib\nconfig = {'apiKey': 'apiKey', 'authDomain':\n 'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':\n 'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':\n 'erproject-dd24e-default-rtdb.appspot.com'}\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\nstudent = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']\nfaculty = ['f1', 'f2', 'f3', 'f4', 'f5']\nst = {}\ndata, data1 = '', ''\nst1 = {}\nfa = {}\nfa1 = {}\ni = 1\nimport schedule\nimport time\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('sender@gamil.com', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('sender@gamil.com', 'receiver@gamil.com',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-5": "#ERP PROJECT\n\n\nimport pyrebase\nimport smtplib\n\nconfig = {\n \"apiKey\": \"apiKey\",\n \"authDomain\": \"erproject-dd24e-default-rtdb.firebaseapp.com\",\n \"databaseURL\": \"https://erproject-dd24e-default-rtdb.firebaseio.com\",\n \"storageBucket\": \"erproject-dd24e-default-rtdb.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\n\ndb.child(\"Student\").push({\"DAY\":\"\"})\ndb.child(\"Faculty\").push({\"DAY\":\"\"}) \nstudent=[\"s1\",\"s2\",\"s3\",\"s4\",\"s5\",\"s6\",\"s7\",\"s8\",\"s9\",\"s10\"]\nfaculty=[\"f1\",\"f2\",\"f3\",\"f4\",\"f5\"]\nst={}\ndata,data1='',''\nst1={}\nfa={}\nfa1={}\ni=1\nimport schedule\nimport time\ndef j():\n global i\n import pandas as pd\n st1.update({i:st})\n data=pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i:fa})\n data1=pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i=i+1 \nwhile(1):\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f=input(\"enter 's' for student,enter 'f' for faculty\")\n f=f.upper()\n if(f==\"S\"):\n name=input(\"enter student name\")\n if name in student:\n a=input(\"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\")\n a=a.upper()\n if(a==\"L\"): #please change sender and receiver's email id for this function to work \n import smtplib\n server =smtplib.SMTP(\"smtp.gmail.com\",587)\n server.starttls()\n server.login(\"sender@gamil.com\",\"akki@9510\")\n message=name+\"is on leave\"\n server.sendmail(\"sender@gamil.com\",\"receiver@gamil.com\",message)\n a=\"A\"\n st.update({name:a})\n from datetime import datetime\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y\")\n db.child(\"Student\").child(\"DAY\").child(date_time).update({name:a})\n \n if(f==\"F\"):\n name=input(\"enter faculty name\")\n if name in faculty:\n a=input(\"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\")\n a=a.upper()\n if(a==\"L\"):\n import smtplib\n server =smtplib.SMTP(\"smtp.gmail.com\",587)\n server.starttls()\n server.login(\"sender@gamil.com\",\"akki@9510\")\n message=name+\"is on leave\"\n server.sendmail(\"sender@gamil.com\",\"receiver@gamil.com\",message)\n a=\"A\"\n fa.update({name:a})\n from datetime import datetime\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y\")\n db.child(\"Faculty\").child(\"DAY\").child(date_time).update({name:a})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.init()
<|reserved_special_token_0|>
pygame.display.set_caption("Omar's Simulation")
screen.fill(background_colour)
<|reserved_special_token_0|>
for i in range(population_size):
robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))
<|reserved_special_token_0|>
if __name__ == '__main__':
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(background_colour)
pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20,
height - 20), 1)
pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)
for obstacle in obstacleArray:
obstacle.drawShape()
for robot in darwin.robot_array:
robot.move()
robot.update()
robot.collide()
robot.evaluate_fitness()
if darwin.check_if_all_dead():
darwin.get_stats()
darwin.make_next_generation()
pygame.display.update()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.init()
background_colour = 0, 0, 0
width, height = 1000, 600
target_location = 800, 300
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Omar's Simulation")
screen.fill(background_colour)
population_size = 50
elitism = 4
robots = []
for i in range(population_size):
robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))
darwin = Darwin(robot_array=robots, population_size=population_size,
elitism=4, mutation_rate=0.1)
if __name__ == '__main__':
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(background_colour)
pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20,
height - 20), 1)
pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)
for obstacle in obstacleArray:
obstacle.drawShape()
for robot in darwin.robot_array:
robot.move()
robot.update()
robot.collide()
robot.evaluate_fitness()
if darwin.check_if_all_dead():
darwin.get_stats()
darwin.make_next_generation()
pygame.display.update()
<|reserved_special_token_1|>
import pygame
from evolution import Darwin
from Sensor import Robot, obstacleArray
pygame.init()
background_colour = 0, 0, 0
width, height = 1000, 600
target_location = 800, 300
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Omar's Simulation")
screen.fill(background_colour)
population_size = 50
elitism = 4
robots = []
for i in range(population_size):
robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))
darwin = Darwin(robot_array=robots, population_size=population_size,
elitism=4, mutation_rate=0.1)
if __name__ == '__main__':
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(background_colour)
pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20,
height - 20), 1)
pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)
for obstacle in obstacleArray:
obstacle.drawShape()
for robot in darwin.robot_array:
robot.move()
robot.update()
robot.collide()
robot.evaluate_fitness()
if darwin.check_if_all_dead():
darwin.get_stats()
darwin.make_next_generation()
pygame.display.update()
<|reserved_special_token_1|>
import pygame
from evolution import Darwin
from Sensor import Robot, obstacleArray
# Game Settings
pygame.init()
background_colour = (0, 0, 0)
(width, height) = (1000, 600)
target_location = (800, 300)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Omar's Simulation")
screen.fill(background_colour)
# GA Hyper parameters
population_size = 50
elitism = 4
# Agent Initialisation
robots = []
for i in range(population_size):
robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))
darwin = Darwin(robot_array=robots, population_size=population_size, elitism=4, mutation_rate=0.1)
if __name__ == '__main__':
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(background_colour)
pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, height - 20), 1)
pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)
# pygame.draw.line(screen, (255, 0, 0), (800, 10), (800, 590))
for obstacle in obstacleArray:
obstacle.drawShape()
# obstacle.move_y()
# pygame.draw.circle(screen, (0, 0, 255), (500, 300), 100, 0)
# pygame.draw.circle(screen, (0, 255, 20), (200, 300), 75, 0)
# pygame.draw.polygon(screen, (255, 255, 255), new_list, 1)
# for pedestrian in all.start_pedestrians:
# pedestrian.move()
# pedestrian.update()
# all.introduce()
for robot in darwin.robot_array:
robot.move()
robot.update()
robot.collide()
robot.evaluate_fitness()
if darwin.check_if_all_dead():
darwin.get_stats()
darwin.make_next_generation()
pygame.display.update()
|
flexible
|
{
"blob_id": "cbcbc0d01c32693ebbdbcf285efdc8e521c447ee",
"index": 3998,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npygame.init()\n<mask token>\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\n<mask token>\nfor i in range(population_size):\n robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\n<mask token>\nif __name__ == '__main__':\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n screen.fill(background_colour)\n pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, \n height - 20), 1)\n pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n for obstacle in obstacleArray:\n obstacle.drawShape()\n for robot in darwin.robot_array:\n robot.move()\n robot.update()\n robot.collide()\n robot.evaluate_fitness()\n if darwin.check_if_all_dead():\n darwin.get_stats()\n darwin.make_next_generation()\n pygame.display.update()\n",
"step-3": "<mask token>\npygame.init()\nbackground_colour = 0, 0, 0\nwidth, height = 1000, 600\ntarget_location = 800, 300\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\npopulation_size = 50\nelitism = 4\nrobots = []\nfor i in range(population_size):\n robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\ndarwin = Darwin(robot_array=robots, population_size=population_size,\n elitism=4, mutation_rate=0.1)\nif __name__ == '__main__':\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n screen.fill(background_colour)\n pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, \n height - 20), 1)\n pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n for obstacle in obstacleArray:\n obstacle.drawShape()\n for robot in darwin.robot_array:\n robot.move()\n robot.update()\n robot.collide()\n robot.evaluate_fitness()\n if darwin.check_if_all_dead():\n darwin.get_stats()\n darwin.make_next_generation()\n pygame.display.update()\n",
"step-4": "import pygame\nfrom evolution import Darwin\nfrom Sensor import Robot, obstacleArray\npygame.init()\nbackground_colour = 0, 0, 0\nwidth, height = 1000, 600\ntarget_location = 800, 300\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\npopulation_size = 50\nelitism = 4\nrobots = []\nfor i in range(population_size):\n robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\ndarwin = Darwin(robot_array=robots, population_size=population_size,\n elitism=4, mutation_rate=0.1)\nif __name__ == '__main__':\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n screen.fill(background_colour)\n pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, \n height - 20), 1)\n pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n for obstacle in obstacleArray:\n obstacle.drawShape()\n for robot in darwin.robot_array:\n robot.move()\n robot.update()\n robot.collide()\n robot.evaluate_fitness()\n if darwin.check_if_all_dead():\n darwin.get_stats()\n darwin.make_next_generation()\n pygame.display.update()\n",
"step-5": "import pygame\nfrom evolution import Darwin\nfrom Sensor import Robot, obstacleArray\n\n\n# Game Settings\npygame.init()\nbackground_colour = (0, 0, 0)\n(width, height) = (1000, 600)\ntarget_location = (800, 300)\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\n\n\n# GA Hyper parameters\npopulation_size = 50\nelitism = 4\n\n# Agent Initialisation\nrobots = []\nfor i in range(population_size):\n\trobots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\ndarwin = Darwin(robot_array=robots, population_size=population_size, elitism=4, mutation_rate=0.1)\n\n\n\nif __name__ == '__main__':\n\trunning = True\n\twhile running:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trunning = False\n\t\tscreen.fill(background_colour)\n\t\tpygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, height - 20), 1)\n\t\tpygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n\t\t# pygame.draw.line(screen, (255, 0, 0), (800, 10), (800, 590))\n\t\tfor obstacle in obstacleArray:\n\t\t\tobstacle.drawShape()\n\t\t# obstacle.move_y()\n\t\t# pygame.draw.circle(screen, (0, 0, 255), (500, 300), 100, 0)\n\t\t# pygame.draw.circle(screen, (0, 255, 20), (200, 300), 75, 0)\n\t\t# pygame.draw.polygon(screen, (255, 255, 255), new_list, 1)\n\t\t# for pedestrian in all.start_pedestrians:\n\t\t# \t\tpedestrian.move()\n\t\t# \t\tpedestrian.update()\n\t\t# \t\tall.introduce()\n\t\tfor robot in darwin.robot_array:\n\t\t\trobot.move()\n\t\t\trobot.update()\n\t\t\trobot.collide()\n\t\t\trobot.evaluate_fitness()\n\t\tif darwin.check_if_all_dead():\n\t\t\tdarwin.get_stats()\n\t\t\tdarwin.make_next_generation()\n\t\tpygame.display.update()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def DFS(x):
if x > 7:
return
else:
DFS(x * 2)
print(x)
DFS(x * 2 + 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def DFS(x):
if x > 7:
return
else:
DFS(x * 2)
print(x)
DFS(x * 2 + 1)
if __name__ == '__main__':
DFS(1)
<|reserved_special_token_1|>
def DFS(x):
# 전위순회
if x > 7:
return
else:
DFS((x * 2))
print(x)
DFS((x*2)+1)
if __name__ == "__main__":
DFS(1)
|
flexible
|
{
"blob_id": "1cc8695aa694359314b6d478fe6abed29fdc6c91",
"index": 3309,
"step-1": "<mask token>\n",
"step-2": "def DFS(x):\n if x > 7:\n return\n else:\n DFS(x * 2)\n print(x)\n DFS(x * 2 + 1)\n\n\n<mask token>\n",
"step-3": "def DFS(x):\n if x > 7:\n return\n else:\n DFS(x * 2)\n print(x)\n DFS(x * 2 + 1)\n\n\nif __name__ == '__main__':\n DFS(1)\n",
"step-4": "\ndef DFS(x):\n # 전위순회\n if x > 7:\n return\n else:\n \n DFS((x * 2))\n print(x)\n DFS((x*2)+1)\n\n \nif __name__ == \"__main__\":\n DFS(1)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Descuento(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.codigo_descuento
class Venta(models.Model):
descripcion = models.CharField(max_length=100)
total_venta = models.IntegerField()
def __str__(self):
return self.total_venta
class Sucursal(models.Model):
direccion = models.CharField(max_length=100)
numero_sucursal = models.IntegerField()
def __str__(self):
return self.numero_sucursal
class Comuna(models.Model):
direccion = models.CharField(max_length=100)
numero_comuna = models.IntegerField()
numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)
def __(self):
return self.numero_sucursal
class Region(models.Model):
direccion = models.CharField(max_length=100)
numero_region = models.IntegerField()
numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)
def __str__(self):
return self.numero_region
class Pedido(models.Model):
numero_pedido = models.IntegerField()
fecha_pedido = models.DateField(max_length=100)
iva = models.IntegerField()
def __int__(self):
return self.numero_pedido
class Proveedores(models.Model):
nombre_proveedor = models.CharField(max_length=40)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)
def __str__(self):
return self.nombre_proveedor
class Suscripcion(models.Model):
fecha_suscripcion = models.DateField
valor_suscripcion = models.IntegerField()
suscrito = models.IntegerField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Usuario(models.Model):
mail = models.CharField(max_length=100)
contraseña = models.CharField(max_length=100)
rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)
def __str__(self):
return self.rut
class DetalleVenta(models.Model):
tipo_comprovante = models.CharField(max_length=100)
serie_comprovante = models.CharField(max_length=7)
fecha_comprovante = models.DateField(max_length=100)
iva = models.IntegerField()
total = models.IntegerField()
def __str__(self):
return self.serie_comprovante
class Descuento(models.Model):
codigo_descuento = models.CharField(max_length=7)
valor_descuento = models.IntegerField()
def __str__(self):
return self.codigo_descuento
class Venta(models.Model):
descripcion = models.CharField(max_length=100)
total_venta = models.IntegerField()
def __str__(self):
return self.total_venta
class Sucursal(models.Model):
direccion = models.CharField(max_length=100)
numero_sucursal = models.IntegerField()
def __str__(self):
return self.numero_sucursal
class Comuna(models.Model):
direccion = models.CharField(max_length=100)
numero_comuna = models.IntegerField()
numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)
def __(self):
return self.numero_sucursal
class Region(models.Model):
direccion = models.CharField(max_length=100)
numero_region = models.IntegerField()
numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)
def __str__(self):
return self.numero_region
class Pedido(models.Model):
numero_pedido = models.IntegerField()
fecha_pedido = models.DateField(max_length=100)
iva = models.IntegerField()
def __int__(self):
return self.numero_pedido
class Proveedores(models.Model):
nombre_proveedor = models.CharField(max_length=40)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)
def __str__(self):
return self.nombre_proveedor
class Suscripcion(models.Model):
fecha_suscripcion = models.DateField
valor_suscripcion = models.IntegerField()
suscrito = models.IntegerField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Producto(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Cliente(models.Model):
nombre = models.CharField(max_length=41)
paterno = models.CharField(max_length=40)
rut = models.CharField(max_length=9)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
def __str__(self):
return self.nombre
class Usuario(models.Model):
mail = models.CharField(max_length=100)
contraseña = models.CharField(max_length=100)
rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)
def __str__(self):
return self.rut
class DetalleVenta(models.Model):
tipo_comprovante = models.CharField(max_length=100)
serie_comprovante = models.CharField(max_length=7)
fecha_comprovante = models.DateField(max_length=100)
iva = models.IntegerField()
total = models.IntegerField()
def __str__(self):
return self.serie_comprovante
class Descuento(models.Model):
codigo_descuento = models.CharField(max_length=7)
valor_descuento = models.IntegerField()
def __str__(self):
return self.codigo_descuento
class Venta(models.Model):
descripcion = models.CharField(max_length=100)
total_venta = models.IntegerField()
def __str__(self):
return self.total_venta
class Sucursal(models.Model):
direccion = models.CharField(max_length=100)
numero_sucursal = models.IntegerField()
def __str__(self):
return self.numero_sucursal
class Comuna(models.Model):
direccion = models.CharField(max_length=100)
numero_comuna = models.IntegerField()
numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)
def __(self):
return self.numero_sucursal
class Region(models.Model):
direccion = models.CharField(max_length=100)
numero_region = models.IntegerField()
numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)
def __str__(self):
return self.numero_region
class Pedido(models.Model):
numero_pedido = models.IntegerField()
fecha_pedido = models.DateField(max_length=100)
iva = models.IntegerField()
def __int__(self):
return self.numero_pedido
class Proveedores(models.Model):
nombre_proveedor = models.CharField(max_length=40)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)
def __str__(self):
return self.nombre_proveedor
class Suscripcion(models.Model):
fecha_suscripcion = models.DateField
valor_suscripcion = models.IntegerField()
suscrito = models.IntegerField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Producto(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.nombre
class Cliente(models.Model):
nombre = models.CharField(max_length=41)
paterno = models.CharField(max_length=40)
rut = models.CharField(max_length=9)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
def __str__(self):
return self.nombre
class Usuario(models.Model):
mail = models.CharField(max_length=100)
contraseña = models.CharField(max_length=100)
rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)
def __str__(self):
return self.rut
class DetalleVenta(models.Model):
tipo_comprovante = models.CharField(max_length=100)
serie_comprovante = models.CharField(max_length=7)
fecha_comprovante = models.DateField(max_length=100)
iva = models.IntegerField()
total = models.IntegerField()
def __str__(self):
return self.serie_comprovante
class Descuento(models.Model):
codigo_descuento = models.CharField(max_length=7)
valor_descuento = models.IntegerField()
def __str__(self):
return self.codigo_descuento
class Venta(models.Model):
descripcion = models.CharField(max_length=100)
total_venta = models.IntegerField()
def __str__(self):
return self.total_venta
class Sucursal(models.Model):
direccion = models.CharField(max_length=100)
numero_sucursal = models.IntegerField()
def __str__(self):
return self.numero_sucursal
class Comuna(models.Model):
direccion = models.CharField(max_length=100)
numero_comuna = models.IntegerField()
numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)
def __(self):
return self.numero_sucursal
class Region(models.Model):
direccion = models.CharField(max_length=100)
numero_region = models.IntegerField()
numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)
def __str__(self):
return self.numero_region
class Pedido(models.Model):
numero_pedido = models.IntegerField()
fecha_pedido = models.DateField(max_length=100)
iva = models.IntegerField()
def __int__(self):
return self.numero_pedido
class Proveedores(models.Model):
nombre_proveedor = models.CharField(max_length=40)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)
def __str__(self):
return self.nombre_proveedor
class Suscripcion(models.Model):
fecha_suscripcion = models.DateField
valor_suscripcion = models.IntegerField()
suscrito = models.IntegerField()
<|reserved_special_token_1|>
from django.db import models
from django.db.models.base import Model
# Create your models here.
class Categoria(models.Model):
categoria = models.CharField(max_length=40)
def __str__(self):
return self.categoria
class Producto(models.Model):
codigo = models.CharField(max_length=40)
nombre = models.CharField(max_length=40)
precio = models.IntegerField()
stock = models.IntegerField()
descripcion = models.CharField(max_length=40)
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)
fecha = models.DateField()
imagen = models.ImageField(null=True, blank=True)
def __str__(self):
return self.nombre
class Cliente(models.Model):
nombre = models.CharField(max_length=41)
paterno = models.CharField(max_length=40)
rut = models.CharField(max_length=9)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
def __str__(self):
return self.nombre
class Usuario(models.Model):
mail = models.CharField(max_length=100)
contraseña = models.CharField(max_length=100)
rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)
def __str__(self):
return self.rut
class DetalleVenta(models.Model):
tipo_comprovante = models.CharField(max_length=100)
serie_comprovante = models.CharField(max_length=7)
fecha_comprovante = models.DateField(max_length=100)
iva = models.IntegerField()
total = models.IntegerField()
def __str__(self):
return self.serie_comprovante
class Descuento(models.Model):
codigo_descuento = models.CharField(max_length=7)
valor_descuento = models.IntegerField()
def __str__(self):
return self.codigo_descuento
class Venta(models.Model):
descripcion = models.CharField(max_length=100)
total_venta = models.IntegerField()
def __str__(self):
return self.total_venta
class Sucursal(models.Model):
direccion = models.CharField(max_length=100)
numero_sucursal = models.IntegerField()
def __str__(self):
return self.numero_sucursal
class Comuna(models.Model):
direccion = models.CharField(max_length=100)
numero_comuna = models.IntegerField()
numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)
def __ (self):
return self.numero_sucursal
class Region(models.Model):
direccion = models.CharField(max_length=100)
numero_region = models.IntegerField()
numero_comuna= models.ForeignKey(Comuna,on_delete=models.DO_NOTHING)
def __str__(self):
return self.numero_region
class Pedido(models.Model):
numero_pedido = models.IntegerField()
fecha_pedido = models.DateField(max_length=100)
iva = models.IntegerField()
def __int__(self):
return self.numero_pedido
class Proveedores(models.Model):
nombre_proveedor = models.CharField(max_length=40)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)
def __str__(self):
return self.nombre_proveedor
class Suscripcion(models.Model):
fecha_suscripcion = models.DateField
valor_suscripcion = models.IntegerField()
suscrito = models.IntegerField()
|
flexible
|
{
"blob_id": "0e19d7251db3382c34ad2d38a7984b65325ecfbf",
"index": 7584,
"step-1": "<mask token>\n\n\nclass Descuento(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-2": "<mask token>\n\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n\n\nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-3": "<mask token>\n\n\nclass Producto(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Cliente(models.Model):\n nombre = models.CharField(max_length=41)\n paterno = models.CharField(max_length=40)\n rut = models.CharField(max_length=9)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n\n def __str__(self):\n return self.nombre\n\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n\n\nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-4": "<mask token>\n\n\nclass Producto(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.nombre\n\n\nclass Cliente(models.Model):\n nombre = models.CharField(max_length=41)\n paterno = models.CharField(max_length=40)\n rut = models.CharField(max_length=9)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n\n def __str__(self):\n return self.nombre\n\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n\n\nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-5": "from django.db import models\nfrom django.db.models.base import Model\n\n# Create your models here.\nclass Categoria(models.Model):\n categoria = models.CharField(max_length=40)\n def __str__(self):\n return self.categoria\n\nclass Producto(models.Model):\n codigo = models.CharField(max_length=40)\n nombre = models.CharField(max_length=40)\n precio = models.IntegerField()\n stock = models.IntegerField()\n descripcion = models.CharField(max_length=40)\n categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)\n fecha = models.DateField()\n imagen = models.ImageField(null=True, blank=True)\n\n\n\n def __str__(self):\n return self.nombre\n\nclass Cliente(models.Model):\n nombre = models.CharField(max_length=41)\n paterno = models.CharField(max_length=40)\n rut = models.CharField(max_length=9)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n\n def __str__(self):\n return self.nombre\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n \nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n \n def __ (self):\n return self.numero_sucursal\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna= models.ForeignKey(Comuna,on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n \n def __str__(self):\n return self.nombre_proveedor\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()",
"step-ids": [
22,
29,
33,
34,
40
]
}
|
[
22,
29,
33,
34,
40
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
img = np.array([[1, 2], [1, 3], [1, 4]])
print(img.tolist())
sys.stdout.flush()
<|reserved_special_token_1|>
import numpy as np
import sys
import os
import cv2
if __name__ == '__main__':
img = np.array([[1, 2], [1, 3], [1, 4]])
print(img.tolist())
sys.stdout.flush()
<|reserved_special_token_1|>
import numpy as np
import sys
import os
import cv2
if __name__ == "__main__":
# print(sys.argv[1])
# img = cv2.imread(sys.argv[1], 0)
# cv2.imshow('img', img)
# cv2.waitKey(0)
img = np.array([[1, 2], [1, 3], [1, 4]])
print(img.tolist())
sys.stdout.flush()
|
flexible
|
{
"blob_id": "54833c19d68bb7a1817639ef761367ce75a3a46f",
"index": 9200,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n img = np.array([[1, 2], [1, 3], [1, 4]])\n print(img.tolist())\n sys.stdout.flush()\n",
"step-3": "import numpy as np\nimport sys\nimport os\nimport cv2\nif __name__ == '__main__':\n img = np.array([[1, 2], [1, 3], [1, 4]])\n print(img.tolist())\n sys.stdout.flush()\n",
"step-4": "import numpy as np\nimport sys\nimport os\nimport cv2\n\n\nif __name__ == \"__main__\":\n \n # print(sys.argv[1])\n # img = cv2.imread(sys.argv[1], 0)\n # cv2.imshow('img', img)\n # cv2.waitKey(0)\n img = np.array([[1, 2], [1, 3], [1, 4]])\n print(img.tolist())\n sys.stdout.flush()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def maximalSquare(self, matrix: List[List[str]]) ->int:
if not matrix:
return 0
dp = [0] * (len(matrix[0]) + 1)
longestSide = 0
for i in range(len(matrix)):
prevSquare = 0
for j in range(len(matrix[0])):
temp = dp[j]
if matrix[i][j] == '1':
dp[j] = 1 + min(dp[j], dp[j - 1], prevSquare)
longestSide = max(longestSide, dp[j])
else:
dp[j] = 0
prevSquare = temp
return longestSide * longestSide
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
Given a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.
Example:
Input:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
Output: 4
"""
# 196ms. 98 percentile
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix:
return 0
dp = [0]*(len(matrix[0]) + 1)
longestSide = 0
for i in range(len(matrix)):
prevSquare =0
for j in range(len(matrix[0])):
temp = dp[j]
if matrix[i][j] == '1':
dp[j] = 1 + min(dp[j], dp[j-1], prevSquare)
longestSide = max(longestSide, dp[j])
else:
dp[j] = 0
prevSquare = temp
return longestSide*longestSide
"""
Notes:
Two hard things in this problem. The first is the logic for the dp, although after the fact
it seems pretty straightforward imo.
At any element you can check if you have a 2 by 2 square by looking at its neighbors. So anywhere you see
1 1
1 1
you're going to replace the bottom right corner with a 2. Note we're going top down and left to right...
So if you see
2 2
2 1
...then you know that you actually have
1 1 1
1 2 2
1 2 1
meaning you can actually put 3 in the corner.
On the other hand, if any of the neighbors are 1's, then you won't have the full cube. Implying that
at each spot, if it's a 1, you take the min of the three neighbors + 1.
The second hard thing is just dealing with the fact the input is characters not ints...annoying imo. The second
solution up there just uses a standard 1d dp array to keep track of the last row processed in terms of ints...which
is all we need. So we can avoid casting anything.
The first solution only casts the first first row and the first column.
Most of it is straightforwards. The one thing I want to note is that temp variable switch. Basically because our dp is
a single array, when we're processing element i, we've already replaced element i-1 with an updated value. That's a problem
because the i-1 value represents the j-1,i-1 value for the ith element in the dp. So we use that little temp switch to
skirt the issue.
"""
|
flexible
|
{
"blob_id": "e5d31a2ea4a8615d24626be2414f5ae49b9cd6a1",
"index": 184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def maximalSquare(self, matrix: List[List[str]]) ->int:\n if not matrix:\n return 0\n dp = [0] * (len(matrix[0]) + 1)\n longestSide = 0\n for i in range(len(matrix)):\n prevSquare = 0\n for j in range(len(matrix[0])):\n temp = dp[j]\n if matrix[i][j] == '1':\n dp[j] = 1 + min(dp[j], dp[j - 1], prevSquare)\n longestSide = max(longestSide, dp[j])\n else:\n dp[j] = 0\n prevSquare = temp\n return longestSide * longestSide\n\n\n<mask token>\n",
"step-4": "\"\"\"\nGiven a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.\n\nExample:\n\nInput: \n\n1 0 1 0 0\n1 0 1 1 1\n1 1 1 1 1\n1 0 0 1 0\n\nOutput: 4\n\"\"\"\n# 196ms. 98 percentile\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n if not matrix:\n return 0\n dp = [0]*(len(matrix[0]) + 1)\n longestSide = 0\n for i in range(len(matrix)):\n prevSquare =0\n for j in range(len(matrix[0])):\n temp = dp[j]\n if matrix[i][j] == '1':\n dp[j] = 1 + min(dp[j], dp[j-1], prevSquare)\n longestSide = max(longestSide, dp[j])\n else:\n dp[j] = 0\n \n prevSquare = temp\n \n return longestSide*longestSide\n\n\n\"\"\"\nNotes:\n\nTwo hard things in this problem. The first is the logic for the dp, although after the fact \nit seems pretty straightforward imo.\nAt any element you can check if you have a 2 by 2 square by looking at its neighbors. So anywhere you see\n1 1\n1 1\nyou're going to replace the bottom right corner with a 2. Note we're going top down and left to right...\nSo if you see\n2 2\n2 1 \n...then you know that you actually have\n1 1 1\n1 2 2\n1 2 1\nmeaning you can actually put 3 in the corner. \n\nOn the other hand, if any of the neighbors are 1's, then you won't have the full cube. Implying that\nat each spot, if it's a 1, you take the min of the three neighbors + 1. \n\n\nThe second hard thing is just dealing with the fact the input is characters not ints...annoying imo. The second\nsolution up there just uses a standard 1d dp array to keep track of the last row processed in terms of ints...which\nis all we need. So we can avoid casting anything. \n\nThe first solution only casts the first first row and the first column. \n\nMost of it is straightforwards. The one thing I want to note is that temp variable switch. Basically because our dp is\na single array, when we're processing element i, we've already replaced element i-1 with an updated value. That's a problem\nbecause the i-1 value represents the j-1,i-1 value for the ith element in the dp. So we use that little temp switch to \nskirt the issue. \n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# %% import libs
import os
import argparse
import logging as logger
import mxnet as mx
import tqdm
from mxnet import autograd
from mxnet import gluon
from gluoncv.utils import makedirs
import datasets as gan_datasets
from utils import vis, get_cpus, TrainingHistory
import models
mx.random.seed(5)
logger.basicConfig(level=logger.INFO, filename='logs/train_loss-dcgan.log')
arg = argparse.ArgumentParser(description="training parameters")
arg.add_argument('--lr', type=float, default=0.001, help='learning rate')
arg.add_argument('--batch', type=int, default=32, help='batch size')
arg.add_argument('--epoch', type=int, default=30000, help='training epochs')
arg.add_argument('--continue', type=bool, default=True, help='should continue with last checkpoint')
arg.add_argument('--save_checkpoint', type=bool, default=True, help='whether save checkpoint')
arg.add_argument('--save_per_epoch', type=int, default=250, help='save checkpoint every specific epochs')
arg.add_argument('--save_dir', type=str, default='saved/params-dcgan', help='check point save path')
arg.add_argument('--cuda', type=bool, default=False, help='whether use gpu, default is True')
arg.add_argument('--pred_per_gen', type=int, default=15, help='make a pred every specific epoch')
arg.add_argument('--validation', type=bool, default=False, help='whether use validation set, default: False')
arg.add_argument('--dataset', type=str, default='rem_face', help='rem, miku, face,rem_face')
opt = arg.parse_args()
# %% define parameters
epoch = opt.epoch
epoch_start = 0
batch_size = opt.batch
lr = opt.lr
should_save_checkpoint = opt.save_checkpoint
save_per_epoch = opt.save_per_epoch
save_dir = opt.save_dir
pred_per_epoch = opt.pred_per_epoch
should_use_val = opt.validation
dataset = opt.dataset
dataset_loader = getattr(gan_datasets, 'load_{}'.format(dataset))
CTX = mx.gpu() if opt.cuda else mx.cpu()
logger.info('Will use {}'.format(CTX))
# %% define dataloader
logger.info("Prepare data")
# noinspection PyTypeChecker
tfs_train = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.Resize(size=(256, 256), interpolation=2),
gluon.data.vision.transforms.RandomFlipLeftRight(),
gluon.data.vision.transforms.RandomSaturation(0.005),
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
# noinspection PyTypeChecker
tfs_val = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.Resize(size=(256, 256), interpolation=2),
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_set, val_set = dataset_loader()
train_loader = gluon.data.DataLoader(train_set.transform_first(tfs_train),
batch_size=batch_size, shuffle=True,
last_batch='rollover', num_workers=get_cpus(), pin_memory=True)
if val_set:
val_loader = gluon.data.DataLoader(val_set.transform_first(tfs_val),
batch_size=batch_size, shuffle=False,
last_batch='rollover', num_workers=get_cpus(), pin_memory=True)
# %% define models
generator = models.make_gen('v4')
discriminator = models.make_dis()
generator.initialize(init=mx.init.Normal(0.02), ctx=CTX)
discriminator.initialize(init=mx.init.Normal(0.02), ctx=CTX)
if getattr(opt, 'continue'):
import utils
makedirs(save_dir)
epoch_start = utils.load_model_from_params(generator, discriminator, save_dir)
logger.info('Continue training at {}, and rest epochs {}'.format(epoch_start, epoch - epoch_start))
generator.hybridize()
discriminator.hybridize()
# %% prepare training
logger.info("Prepare training")
if should_use_val:
history_labels = ['gloss', 'gval_loss', 'dloss', 'dval_loss']
else:
history_labels = ['gloss', 'dloss']
history = TrainingHistory(labels=history_labels)
loss = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
trainer_gen = gluon.Trainer(generator.collect_params(), optimizer='adam', optimizer_params={
'learning_rate': lr,
'beta1': 0.5
})
trainer_dis = gluon.Trainer(discriminator.collect_params(), optimizer='adam', optimizer_params={
'learning_rate': lr,
'beta1': 0.5
})
true_label = mx.nd.ones((batch_size,), ctx=CTX)
fake_label = mx.nd.zeros((batch_size,), ctx=CTX)
def make_noises(bs):
return mx.nd.random_normal(0, 1, shape=(bs, 512), ctx=CTX, dtype='float32').reshape((bs, 512, 1, 1))
pred_noise = make_noises(1)
mx.nd.save('pred_noise', pred_noise)
def validation(g, d, val_loader):
g_val_loss = 0.0
d_val_loss = 0.0
iter_times = 0
for data, _ in tqdm.tqdm(
val_loader,
desc="Validating",
leave=False,
unit='batch',
unit_scale=True,
mininterval=1,
maxinterval=5,
dynamic_ncols=True):
iter_times += 1
bs = len(data)
nosise = make_noises(bs)
data = data.as_in_context(CTX)
with autograd.predict_mode():
# loss for d
out = d(data)
err2real = loss(out, true_label)
fake_img = g(nosise)
out = d(fake_img)
err2fake = loss(out, fake_label)
err4dis = err2real + err2fake
d_val_loss += err4dis.mean().asscalar()
# loss for g
fake_img = g(nosise)
out = d(fake_img)
err4gen = loss(out, true_label)
g_val_loss += err4gen.mean().asscalar()
return g_val_loss / iter_times, d_val_loss / iter_times
# %% begin training
d_iter_times = 0
g_iter_times = 0
d_update_times = 0
g_update_times = 0
g_train_loss = 0.0
d_train_loss = 0.0
logger.info("Begin training")
for ep in tqdm.tqdm(range(epoch_start, epoch + 1),
total=epoch,
desc="Total Progress",
leave=False,
initial=epoch_start,
unit='epoch',
unit_scale=True,
mininterval=10,
maxinterval=100,
dynamic_ncols=True):
for data, _ in tqdm.tqdm(
train_loader,
desc="Epoch {}".format(ep),
leave=False,
unit='batch',
unit_scale=True,
mininterval=1,
maxinterval=5,
dynamic_ncols=True):
bs = len(data)
nosise = make_noises(bs)
data = data.as_in_context(CTX)
# begin training discriminator
with autograd.record():
d_iter_times += 1
d_update_times += 1
# train with real image
out = discriminator(data)
err2real = loss(out, true_label)
# train with fake image
# detach the input, or its gradients will be computed
with autograd.predict_mode():
fake_img = generator(nosise)
out = discriminator(fake_img.detach())
err2fake = loss(out, fake_label)
err4dis = err2real + err2fake
err4dis.backward()
trainer_dis.step(bs)
d_train_loss += err4dis.mean().asscalar()
if d_iter_times % 5 == 0:
g_iter_times += 1
g_update_times += 1
# begin training generator
with autograd.record():
fake_img = generator(nosise)
with autograd.predict_mode():
out = discriminator(fake_img)
err4gen = loss(out, true_label)
err4gen.backward()
trainer_gen.step(bs)
g_train_loss += err4gen.mean().asscalar()
g_train_loss /= d_iter_times
d_train_loss /= g_iter_times
# use validation set or not
if should_use_val:
g_val_loss, d_val_loss = validation(generator, discriminator, val_loader)
history.update([g_train_loss, g_val_loss, d_train_loss, d_val_loss])
logger.info("Generator[train: {}, val: {}]".format(g_train_loss, g_val_loss))
logger.info("Discriminator[train: {}, val: {}]".format(d_train_loss, d_val_loss))
else:
history.update([g_train_loss, d_train_loss])
logger.info("Generator[{}], Discriminator[{}]".format(g_train_loss, d_train_loss))
g_train_loss = 0.0
d_train_loss = 0.0
d_iter_times = 0
g_iter_times = 0
# make a prediction
if g_update_times % pred_per_epoch == 0:
fake = generator(make_noises(1))[0]
unique_fake = generator(pred_noise)[0]
pred_path = 'logs/pred-dcgan'
pred_unique_path = os.path.join(pred_path, 'unique')
makedirs(pred_path)
makedirs(pred_unique_path)
vis.show_img(fake.transpose((1, 2, 0)), save_path=pred_path)
vis.show_img(unique_fake.transpose((1, 2, 0)), save_path=pred_unique_path)
# save history plot every epoch
history.plot(save_path='logs/histories-dcgan')
# save checkpoint
if should_save_checkpoint:
if ep % save_per_epoch == 0:
generator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.format(ep)))
discriminator.save_parameters(os.path.join(save_dir, 'discriminator_{:04d}.params'.format(ep)))
history.plot(save_path='logs/histories-dcgan')
generator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.format(ep)))
|
normal
|
{
"blob_id": "c14d76493cd3dacc55c993f588dec555b7a4a13c",
"index": 4192,
"step-1": "<mask token>\n\n\ndef make_noises(bs):\n return mx.nd.random_normal(0, 1, shape=(bs, 512), ctx=CTX, dtype='float32'\n ).reshape((bs, 512, 1, 1))\n\n\n<mask token>\n",
"step-2": "<mask token>\nmx.random.seed(5)\nlogger.basicConfig(level=logger.INFO, filename='logs/train_loss-dcgan.log')\n<mask token>\narg.add_argument('--lr', type=float, default=0.001, help='learning rate')\narg.add_argument('--batch', type=int, default=32, help='batch size')\narg.add_argument('--epoch', type=int, default=30000, help='training epochs')\narg.add_argument('--continue', type=bool, default=True, help=\n 'should continue with last checkpoint')\narg.add_argument('--save_checkpoint', type=bool, default=True, help=\n 'whether save checkpoint')\narg.add_argument('--save_per_epoch', type=int, default=250, help=\n 'save checkpoint every specific epochs')\narg.add_argument('--save_dir', type=str, default='saved/params-dcgan', help\n ='check point save path')\narg.add_argument('--cuda', type=bool, default=False, help=\n 'whether use gpu, default is True')\narg.add_argument('--pred_per_gen', type=int, default=15, help=\n 'make a pred every specific epoch')\narg.add_argument('--validation', type=bool, default=False, help=\n 'whether use validation set, default: False')\narg.add_argument('--dataset', type=str, default='rem_face', help=\n 'rem, miku, face,rem_face')\n<mask token>\nlogger.info('Will use {}'.format(CTX))\nlogger.info('Prepare data')\n<mask token>\nif val_set:\n val_loader = gluon.data.DataLoader(val_set.transform_first(tfs_val),\n batch_size=batch_size, shuffle=False, last_batch='rollover',\n num_workers=get_cpus(), pin_memory=True)\n<mask token>\ngenerator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\ndiscriminator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\nif getattr(opt, 'continue'):\n import utils\n makedirs(save_dir)\n epoch_start = utils.load_model_from_params(generator, discriminator,\n save_dir)\n logger.info('Continue training at {}, and rest epochs {}'.format(\n epoch_start, epoch - epoch_start))\ngenerator.hybridize()\ndiscriminator.hybridize()\nlogger.info('Prepare training')\nif should_use_val:\n history_labels = ['gloss', 'gval_loss', 'dloss', 'dval_loss']\nelse:\n history_labels = ['gloss', 'dloss']\n<mask token>\n\n\ndef make_noises(bs):\n return mx.nd.random_normal(0, 1, shape=(bs, 512), ctx=CTX, dtype='float32'\n ).reshape((bs, 512, 1, 1))\n\n\n<mask token>\nmx.nd.save('pred_noise', pred_noise)\n\n\ndef validation(g, d, val_loader):\n g_val_loss = 0.0\n d_val_loss = 0.0\n iter_times = 0\n for data, _ in tqdm.tqdm(val_loader, desc='Validating', leave=False,\n unit='batch', unit_scale=True, mininterval=1, maxinterval=5,\n dynamic_ncols=True):\n iter_times += 1\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n with autograd.predict_mode():\n out = d(data)\n err2real = loss(out, true_label)\n fake_img = g(nosise)\n out = d(fake_img)\n err2fake = loss(out, fake_label)\n err4dis = err2real + err2fake\n d_val_loss += err4dis.mean().asscalar()\n fake_img = g(nosise)\n out = d(fake_img)\n err4gen = loss(out, true_label)\n g_val_loss += err4gen.mean().asscalar()\n return g_val_loss / iter_times, d_val_loss / iter_times\n\n\n<mask token>\nlogger.info('Begin training')\nfor ep in tqdm.tqdm(range(epoch_start, epoch + 1), total=epoch, desc=\n 'Total Progress', leave=False, initial=epoch_start, unit='epoch',\n unit_scale=True, mininterval=10, maxinterval=100, dynamic_ncols=True):\n for data, _ in tqdm.tqdm(train_loader, desc='Epoch {}'.format(ep),\n leave=False, unit='batch', unit_scale=True, mininterval=1,\n maxinterval=5, dynamic_ncols=True):\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n with autograd.record():\n d_iter_times += 1\n d_update_times += 1\n out = discriminator(data)\n err2real = loss(out, true_label)\n with autograd.predict_mode():\n fake_img = generator(nosise)\n out = discriminator(fake_img.detach())\n err2fake = loss(out, fake_label)\n err4dis = err2real + err2fake\n err4dis.backward()\n trainer_dis.step(bs)\n d_train_loss += err4dis.mean().asscalar()\n if d_iter_times % 5 == 0:\n g_iter_times += 1\n g_update_times += 1\n with autograd.record():\n fake_img = generator(nosise)\n with autograd.predict_mode():\n out = discriminator(fake_img)\n err4gen = loss(out, true_label)\n err4gen.backward()\n trainer_gen.step(bs)\n g_train_loss += err4gen.mean().asscalar()\n g_train_loss /= d_iter_times\n d_train_loss /= g_iter_times\n if should_use_val:\n g_val_loss, d_val_loss = validation(generator,\n discriminator, val_loader)\n history.update([g_train_loss, g_val_loss, d_train_loss,\n d_val_loss])\n logger.info('Generator[train: {}, val: {}]'.format(\n g_train_loss, g_val_loss))\n logger.info('Discriminator[train: {}, val: {}]'.format(\n d_train_loss, d_val_loss))\n else:\n history.update([g_train_loss, d_train_loss])\n logger.info('Generator[{}], Discriminator[{}]'.format(\n g_train_loss, d_train_loss))\n g_train_loss = 0.0\n d_train_loss = 0.0\n d_iter_times = 0\n g_iter_times = 0\n if g_update_times % pred_per_epoch == 0:\n fake = generator(make_noises(1))[0]\n unique_fake = generator(pred_noise)[0]\n pred_path = 'logs/pred-dcgan'\n pred_unique_path = os.path.join(pred_path, 'unique')\n makedirs(pred_path)\n makedirs(pred_unique_path)\n vis.show_img(fake.transpose((1, 2, 0)), save_path=pred_path)\n vis.show_img(unique_fake.transpose((1, 2, 0)), save_path=\n pred_unique_path)\n history.plot(save_path='logs/histories-dcgan')\n if should_save_checkpoint:\n if ep % save_per_epoch == 0:\n generator.save_parameters(os.path.join(save_dir,\n 'generator_{:04d}.params'.format(ep)))\n discriminator.save_parameters(os.path.join(save_dir,\n 'discriminator_{:04d}.params'.format(ep)))\nhistory.plot(save_path='logs/histories-dcgan')\ngenerator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.\n format(ep)))\n",
"step-3": "<mask token>\nmx.random.seed(5)\nlogger.basicConfig(level=logger.INFO, filename='logs/train_loss-dcgan.log')\narg = argparse.ArgumentParser(description='training parameters')\narg.add_argument('--lr', type=float, default=0.001, help='learning rate')\narg.add_argument('--batch', type=int, default=32, help='batch size')\narg.add_argument('--epoch', type=int, default=30000, help='training epochs')\narg.add_argument('--continue', type=bool, default=True, help=\n 'should continue with last checkpoint')\narg.add_argument('--save_checkpoint', type=bool, default=True, help=\n 'whether save checkpoint')\narg.add_argument('--save_per_epoch', type=int, default=250, help=\n 'save checkpoint every specific epochs')\narg.add_argument('--save_dir', type=str, default='saved/params-dcgan', help\n ='check point save path')\narg.add_argument('--cuda', type=bool, default=False, help=\n 'whether use gpu, default is True')\narg.add_argument('--pred_per_gen', type=int, default=15, help=\n 'make a pred every specific epoch')\narg.add_argument('--validation', type=bool, default=False, help=\n 'whether use validation set, default: False')\narg.add_argument('--dataset', type=str, default='rem_face', help=\n 'rem, miku, face,rem_face')\nopt = arg.parse_args()\nepoch = opt.epoch\nepoch_start = 0\nbatch_size = opt.batch\nlr = opt.lr\nshould_save_checkpoint = opt.save_checkpoint\nsave_per_epoch = opt.save_per_epoch\nsave_dir = opt.save_dir\npred_per_epoch = opt.pred_per_epoch\nshould_use_val = opt.validation\ndataset = opt.dataset\ndataset_loader = getattr(gan_datasets, 'load_{}'.format(dataset))\nCTX = mx.gpu() if opt.cuda else mx.cpu()\nlogger.info('Will use {}'.format(CTX))\nlogger.info('Prepare data')\ntfs_train = gluon.data.vision.transforms.Compose([gluon.data.vision.\n transforms.Resize(size=(256, 256), interpolation=2), gluon.data.vision.\n transforms.RandomFlipLeftRight(), gluon.data.vision.transforms.\n RandomSaturation(0.005), gluon.data.vision.transforms.ToTensor(), gluon\n .data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, \n 0.5))])\ntfs_val = gluon.data.vision.transforms.Compose([gluon.data.vision.\n transforms.Resize(size=(256, 256), interpolation=2), gluon.data.vision.\n transforms.ToTensor(), gluon.data.vision.transforms.Normalize(mean=(0.5,\n 0.5, 0.5), std=(0.5, 0.5, 0.5))])\ntrain_set, val_set = dataset_loader()\ntrain_loader = gluon.data.DataLoader(train_set.transform_first(tfs_train),\n batch_size=batch_size, shuffle=True, last_batch='rollover', num_workers\n =get_cpus(), pin_memory=True)\nif val_set:\n val_loader = gluon.data.DataLoader(val_set.transform_first(tfs_val),\n batch_size=batch_size, shuffle=False, last_batch='rollover',\n num_workers=get_cpus(), pin_memory=True)\ngenerator = models.make_gen('v4')\ndiscriminator = models.make_dis()\ngenerator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\ndiscriminator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\nif getattr(opt, 'continue'):\n import utils\n makedirs(save_dir)\n epoch_start = utils.load_model_from_params(generator, discriminator,\n save_dir)\n logger.info('Continue training at {}, and rest epochs {}'.format(\n epoch_start, epoch - epoch_start))\ngenerator.hybridize()\ndiscriminator.hybridize()\nlogger.info('Prepare training')\nif should_use_val:\n history_labels = ['gloss', 'gval_loss', 'dloss', 'dval_loss']\nelse:\n history_labels = ['gloss', 'dloss']\nhistory = TrainingHistory(labels=history_labels)\nloss = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)\ntrainer_gen = gluon.Trainer(generator.collect_params(), optimizer='adam',\n optimizer_params={'learning_rate': lr, 'beta1': 0.5})\ntrainer_dis = gluon.Trainer(discriminator.collect_params(), optimizer=\n 'adam', optimizer_params={'learning_rate': lr, 'beta1': 0.5})\ntrue_label = mx.nd.ones((batch_size,), ctx=CTX)\nfake_label = mx.nd.zeros((batch_size,), ctx=CTX)\n\n\ndef make_noises(bs):\n return mx.nd.random_normal(0, 1, shape=(bs, 512), ctx=CTX, dtype='float32'\n ).reshape((bs, 512, 1, 1))\n\n\npred_noise = make_noises(1)\nmx.nd.save('pred_noise', pred_noise)\n\n\ndef validation(g, d, val_loader):\n g_val_loss = 0.0\n d_val_loss = 0.0\n iter_times = 0\n for data, _ in tqdm.tqdm(val_loader, desc='Validating', leave=False,\n unit='batch', unit_scale=True, mininterval=1, maxinterval=5,\n dynamic_ncols=True):\n iter_times += 1\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n with autograd.predict_mode():\n out = d(data)\n err2real = loss(out, true_label)\n fake_img = g(nosise)\n out = d(fake_img)\n err2fake = loss(out, fake_label)\n err4dis = err2real + err2fake\n d_val_loss += err4dis.mean().asscalar()\n fake_img = g(nosise)\n out = d(fake_img)\n err4gen = loss(out, true_label)\n g_val_loss += err4gen.mean().asscalar()\n return g_val_loss / iter_times, d_val_loss / iter_times\n\n\nd_iter_times = 0\ng_iter_times = 0\nd_update_times = 0\ng_update_times = 0\ng_train_loss = 0.0\nd_train_loss = 0.0\nlogger.info('Begin training')\nfor ep in tqdm.tqdm(range(epoch_start, epoch + 1), total=epoch, desc=\n 'Total Progress', leave=False, initial=epoch_start, unit='epoch',\n unit_scale=True, mininterval=10, maxinterval=100, dynamic_ncols=True):\n for data, _ in tqdm.tqdm(train_loader, desc='Epoch {}'.format(ep),\n leave=False, unit='batch', unit_scale=True, mininterval=1,\n maxinterval=5, dynamic_ncols=True):\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n with autograd.record():\n d_iter_times += 1\n d_update_times += 1\n out = discriminator(data)\n err2real = loss(out, true_label)\n with autograd.predict_mode():\n fake_img = generator(nosise)\n out = discriminator(fake_img.detach())\n err2fake = loss(out, fake_label)\n err4dis = err2real + err2fake\n err4dis.backward()\n trainer_dis.step(bs)\n d_train_loss += err4dis.mean().asscalar()\n if d_iter_times % 5 == 0:\n g_iter_times += 1\n g_update_times += 1\n with autograd.record():\n fake_img = generator(nosise)\n with autograd.predict_mode():\n out = discriminator(fake_img)\n err4gen = loss(out, true_label)\n err4gen.backward()\n trainer_gen.step(bs)\n g_train_loss += err4gen.mean().asscalar()\n g_train_loss /= d_iter_times\n d_train_loss /= g_iter_times\n if should_use_val:\n g_val_loss, d_val_loss = validation(generator,\n discriminator, val_loader)\n history.update([g_train_loss, g_val_loss, d_train_loss,\n d_val_loss])\n logger.info('Generator[train: {}, val: {}]'.format(\n g_train_loss, g_val_loss))\n logger.info('Discriminator[train: {}, val: {}]'.format(\n d_train_loss, d_val_loss))\n else:\n history.update([g_train_loss, d_train_loss])\n logger.info('Generator[{}], Discriminator[{}]'.format(\n g_train_loss, d_train_loss))\n g_train_loss = 0.0\n d_train_loss = 0.0\n d_iter_times = 0\n g_iter_times = 0\n if g_update_times % pred_per_epoch == 0:\n fake = generator(make_noises(1))[0]\n unique_fake = generator(pred_noise)[0]\n pred_path = 'logs/pred-dcgan'\n pred_unique_path = os.path.join(pred_path, 'unique')\n makedirs(pred_path)\n makedirs(pred_unique_path)\n vis.show_img(fake.transpose((1, 2, 0)), save_path=pred_path)\n vis.show_img(unique_fake.transpose((1, 2, 0)), save_path=\n pred_unique_path)\n history.plot(save_path='logs/histories-dcgan')\n if should_save_checkpoint:\n if ep % save_per_epoch == 0:\n generator.save_parameters(os.path.join(save_dir,\n 'generator_{:04d}.params'.format(ep)))\n discriminator.save_parameters(os.path.join(save_dir,\n 'discriminator_{:04d}.params'.format(ep)))\nhistory.plot(save_path='logs/histories-dcgan')\ngenerator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.\n format(ep)))\n",
"step-4": "import os\nimport argparse\nimport logging as logger\nimport mxnet as mx\nimport tqdm\nfrom mxnet import autograd\nfrom mxnet import gluon\nfrom gluoncv.utils import makedirs\nimport datasets as gan_datasets\nfrom utils import vis, get_cpus, TrainingHistory\nimport models\nmx.random.seed(5)\nlogger.basicConfig(level=logger.INFO, filename='logs/train_loss-dcgan.log')\narg = argparse.ArgumentParser(description='training parameters')\narg.add_argument('--lr', type=float, default=0.001, help='learning rate')\narg.add_argument('--batch', type=int, default=32, help='batch size')\narg.add_argument('--epoch', type=int, default=30000, help='training epochs')\narg.add_argument('--continue', type=bool, default=True, help=\n 'should continue with last checkpoint')\narg.add_argument('--save_checkpoint', type=bool, default=True, help=\n 'whether save checkpoint')\narg.add_argument('--save_per_epoch', type=int, default=250, help=\n 'save checkpoint every specific epochs')\narg.add_argument('--save_dir', type=str, default='saved/params-dcgan', help\n ='check point save path')\narg.add_argument('--cuda', type=bool, default=False, help=\n 'whether use gpu, default is True')\narg.add_argument('--pred_per_gen', type=int, default=15, help=\n 'make a pred every specific epoch')\narg.add_argument('--validation', type=bool, default=False, help=\n 'whether use validation set, default: False')\narg.add_argument('--dataset', type=str, default='rem_face', help=\n 'rem, miku, face,rem_face')\nopt = arg.parse_args()\nepoch = opt.epoch\nepoch_start = 0\nbatch_size = opt.batch\nlr = opt.lr\nshould_save_checkpoint = opt.save_checkpoint\nsave_per_epoch = opt.save_per_epoch\nsave_dir = opt.save_dir\npred_per_epoch = opt.pred_per_epoch\nshould_use_val = opt.validation\ndataset = opt.dataset\ndataset_loader = getattr(gan_datasets, 'load_{}'.format(dataset))\nCTX = mx.gpu() if opt.cuda else mx.cpu()\nlogger.info('Will use {}'.format(CTX))\nlogger.info('Prepare data')\ntfs_train = gluon.data.vision.transforms.Compose([gluon.data.vision.\n transforms.Resize(size=(256, 256), interpolation=2), gluon.data.vision.\n transforms.RandomFlipLeftRight(), gluon.data.vision.transforms.\n RandomSaturation(0.005), gluon.data.vision.transforms.ToTensor(), gluon\n .data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, \n 0.5))])\ntfs_val = gluon.data.vision.transforms.Compose([gluon.data.vision.\n transforms.Resize(size=(256, 256), interpolation=2), gluon.data.vision.\n transforms.ToTensor(), gluon.data.vision.transforms.Normalize(mean=(0.5,\n 0.5, 0.5), std=(0.5, 0.5, 0.5))])\ntrain_set, val_set = dataset_loader()\ntrain_loader = gluon.data.DataLoader(train_set.transform_first(tfs_train),\n batch_size=batch_size, shuffle=True, last_batch='rollover', num_workers\n =get_cpus(), pin_memory=True)\nif val_set:\n val_loader = gluon.data.DataLoader(val_set.transform_first(tfs_val),\n batch_size=batch_size, shuffle=False, last_batch='rollover',\n num_workers=get_cpus(), pin_memory=True)\ngenerator = models.make_gen('v4')\ndiscriminator = models.make_dis()\ngenerator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\ndiscriminator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\nif getattr(opt, 'continue'):\n import utils\n makedirs(save_dir)\n epoch_start = utils.load_model_from_params(generator, discriminator,\n save_dir)\n logger.info('Continue training at {}, and rest epochs {}'.format(\n epoch_start, epoch - epoch_start))\ngenerator.hybridize()\ndiscriminator.hybridize()\nlogger.info('Prepare training')\nif should_use_val:\n history_labels = ['gloss', 'gval_loss', 'dloss', 'dval_loss']\nelse:\n history_labels = ['gloss', 'dloss']\nhistory = TrainingHistory(labels=history_labels)\nloss = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)\ntrainer_gen = gluon.Trainer(generator.collect_params(), optimizer='adam',\n optimizer_params={'learning_rate': lr, 'beta1': 0.5})\ntrainer_dis = gluon.Trainer(discriminator.collect_params(), optimizer=\n 'adam', optimizer_params={'learning_rate': lr, 'beta1': 0.5})\ntrue_label = mx.nd.ones((batch_size,), ctx=CTX)\nfake_label = mx.nd.zeros((batch_size,), ctx=CTX)\n\n\ndef make_noises(bs):\n return mx.nd.random_normal(0, 1, shape=(bs, 512), ctx=CTX, dtype='float32'\n ).reshape((bs, 512, 1, 1))\n\n\npred_noise = make_noises(1)\nmx.nd.save('pred_noise', pred_noise)\n\n\ndef validation(g, d, val_loader):\n g_val_loss = 0.0\n d_val_loss = 0.0\n iter_times = 0\n for data, _ in tqdm.tqdm(val_loader, desc='Validating', leave=False,\n unit='batch', unit_scale=True, mininterval=1, maxinterval=5,\n dynamic_ncols=True):\n iter_times += 1\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n with autograd.predict_mode():\n out = d(data)\n err2real = loss(out, true_label)\n fake_img = g(nosise)\n out = d(fake_img)\n err2fake = loss(out, fake_label)\n err4dis = err2real + err2fake\n d_val_loss += err4dis.mean().asscalar()\n fake_img = g(nosise)\n out = d(fake_img)\n err4gen = loss(out, true_label)\n g_val_loss += err4gen.mean().asscalar()\n return g_val_loss / iter_times, d_val_loss / iter_times\n\n\nd_iter_times = 0\ng_iter_times = 0\nd_update_times = 0\ng_update_times = 0\ng_train_loss = 0.0\nd_train_loss = 0.0\nlogger.info('Begin training')\nfor ep in tqdm.tqdm(range(epoch_start, epoch + 1), total=epoch, desc=\n 'Total Progress', leave=False, initial=epoch_start, unit='epoch',\n unit_scale=True, mininterval=10, maxinterval=100, dynamic_ncols=True):\n for data, _ in tqdm.tqdm(train_loader, desc='Epoch {}'.format(ep),\n leave=False, unit='batch', unit_scale=True, mininterval=1,\n maxinterval=5, dynamic_ncols=True):\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n with autograd.record():\n d_iter_times += 1\n d_update_times += 1\n out = discriminator(data)\n err2real = loss(out, true_label)\n with autograd.predict_mode():\n fake_img = generator(nosise)\n out = discriminator(fake_img.detach())\n err2fake = loss(out, fake_label)\n err4dis = err2real + err2fake\n err4dis.backward()\n trainer_dis.step(bs)\n d_train_loss += err4dis.mean().asscalar()\n if d_iter_times % 5 == 0:\n g_iter_times += 1\n g_update_times += 1\n with autograd.record():\n fake_img = generator(nosise)\n with autograd.predict_mode():\n out = discriminator(fake_img)\n err4gen = loss(out, true_label)\n err4gen.backward()\n trainer_gen.step(bs)\n g_train_loss += err4gen.mean().asscalar()\n g_train_loss /= d_iter_times\n d_train_loss /= g_iter_times\n if should_use_val:\n g_val_loss, d_val_loss = validation(generator,\n discriminator, val_loader)\n history.update([g_train_loss, g_val_loss, d_train_loss,\n d_val_loss])\n logger.info('Generator[train: {}, val: {}]'.format(\n g_train_loss, g_val_loss))\n logger.info('Discriminator[train: {}, val: {}]'.format(\n d_train_loss, d_val_loss))\n else:\n history.update([g_train_loss, d_train_loss])\n logger.info('Generator[{}], Discriminator[{}]'.format(\n g_train_loss, d_train_loss))\n g_train_loss = 0.0\n d_train_loss = 0.0\n d_iter_times = 0\n g_iter_times = 0\n if g_update_times % pred_per_epoch == 0:\n fake = generator(make_noises(1))[0]\n unique_fake = generator(pred_noise)[0]\n pred_path = 'logs/pred-dcgan'\n pred_unique_path = os.path.join(pred_path, 'unique')\n makedirs(pred_path)\n makedirs(pred_unique_path)\n vis.show_img(fake.transpose((1, 2, 0)), save_path=pred_path)\n vis.show_img(unique_fake.transpose((1, 2, 0)), save_path=\n pred_unique_path)\n history.plot(save_path='logs/histories-dcgan')\n if should_save_checkpoint:\n if ep % save_per_epoch == 0:\n generator.save_parameters(os.path.join(save_dir,\n 'generator_{:04d}.params'.format(ep)))\n discriminator.save_parameters(os.path.join(save_dir,\n 'discriminator_{:04d}.params'.format(ep)))\nhistory.plot(save_path='logs/histories-dcgan')\ngenerator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.\n format(ep)))\n",
"step-5": "# %% import libs\nimport os\nimport argparse\nimport logging as logger\nimport mxnet as mx\nimport tqdm\nfrom mxnet import autograd\nfrom mxnet import gluon\nfrom gluoncv.utils import makedirs\n\nimport datasets as gan_datasets\nfrom utils import vis, get_cpus, TrainingHistory\nimport models\n\nmx.random.seed(5)\nlogger.basicConfig(level=logger.INFO, filename='logs/train_loss-dcgan.log')\n\narg = argparse.ArgumentParser(description=\"training parameters\")\narg.add_argument('--lr', type=float, default=0.001, help='learning rate')\narg.add_argument('--batch', type=int, default=32, help='batch size')\narg.add_argument('--epoch', type=int, default=30000, help='training epochs')\narg.add_argument('--continue', type=bool, default=True, help='should continue with last checkpoint')\narg.add_argument('--save_checkpoint', type=bool, default=True, help='whether save checkpoint')\narg.add_argument('--save_per_epoch', type=int, default=250, help='save checkpoint every specific epochs')\narg.add_argument('--save_dir', type=str, default='saved/params-dcgan', help='check point save path')\narg.add_argument('--cuda', type=bool, default=False, help='whether use gpu, default is True')\narg.add_argument('--pred_per_gen', type=int, default=15, help='make a pred every specific epoch')\narg.add_argument('--validation', type=bool, default=False, help='whether use validation set, default: False')\narg.add_argument('--dataset', type=str, default='rem_face', help='rem, miku, face,rem_face')\n\nopt = arg.parse_args()\n\n# %% define parameters\nepoch = opt.epoch\nepoch_start = 0\nbatch_size = opt.batch\nlr = opt.lr\nshould_save_checkpoint = opt.save_checkpoint\nsave_per_epoch = opt.save_per_epoch\nsave_dir = opt.save_dir\npred_per_epoch = opt.pred_per_epoch\nshould_use_val = opt.validation\ndataset = opt.dataset\ndataset_loader = getattr(gan_datasets, 'load_{}'.format(dataset))\n\nCTX = mx.gpu() if opt.cuda else mx.cpu()\nlogger.info('Will use {}'.format(CTX))\n\n# %% define dataloader\nlogger.info(\"Prepare data\")\n# noinspection PyTypeChecker\ntfs_train = gluon.data.vision.transforms.Compose([\n gluon.data.vision.transforms.Resize(size=(256, 256), interpolation=2),\n gluon.data.vision.transforms.RandomFlipLeftRight(),\n gluon.data.vision.transforms.RandomSaturation(0.005),\n gluon.data.vision.transforms.ToTensor(),\n gluon.data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n])\n\n# noinspection PyTypeChecker\ntfs_val = gluon.data.vision.transforms.Compose([\n gluon.data.vision.transforms.Resize(size=(256, 256), interpolation=2),\n gluon.data.vision.transforms.ToTensor(),\n gluon.data.vision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n])\n\ntrain_set, val_set = dataset_loader()\ntrain_loader = gluon.data.DataLoader(train_set.transform_first(tfs_train),\n batch_size=batch_size, shuffle=True,\n last_batch='rollover', num_workers=get_cpus(), pin_memory=True)\nif val_set:\n val_loader = gluon.data.DataLoader(val_set.transform_first(tfs_val),\n batch_size=batch_size, shuffle=False,\n last_batch='rollover', num_workers=get_cpus(), pin_memory=True)\n\n# %% define models\ngenerator = models.make_gen('v4')\ndiscriminator = models.make_dis()\ngenerator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\ndiscriminator.initialize(init=mx.init.Normal(0.02), ctx=CTX)\nif getattr(opt, 'continue'):\n import utils\n\n makedirs(save_dir)\n epoch_start = utils.load_model_from_params(generator, discriminator, save_dir)\n logger.info('Continue training at {}, and rest epochs {}'.format(epoch_start, epoch - epoch_start))\n\ngenerator.hybridize()\ndiscriminator.hybridize()\n\n# %% prepare training\nlogger.info(\"Prepare training\")\nif should_use_val:\n history_labels = ['gloss', 'gval_loss', 'dloss', 'dval_loss']\nelse:\n history_labels = ['gloss', 'dloss']\nhistory = TrainingHistory(labels=history_labels)\nloss = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)\ntrainer_gen = gluon.Trainer(generator.collect_params(), optimizer='adam', optimizer_params={\n 'learning_rate': lr,\n 'beta1': 0.5\n})\ntrainer_dis = gluon.Trainer(discriminator.collect_params(), optimizer='adam', optimizer_params={\n 'learning_rate': lr,\n 'beta1': 0.5\n})\ntrue_label = mx.nd.ones((batch_size,), ctx=CTX)\nfake_label = mx.nd.zeros((batch_size,), ctx=CTX)\n\n\ndef make_noises(bs):\n return mx.nd.random_normal(0, 1, shape=(bs, 512), ctx=CTX, dtype='float32').reshape((bs, 512, 1, 1))\n\n\npred_noise = make_noises(1)\nmx.nd.save('pred_noise', pred_noise)\n\n\ndef validation(g, d, val_loader):\n g_val_loss = 0.0\n d_val_loss = 0.0\n iter_times = 0\n for data, _ in tqdm.tqdm(\n val_loader,\n desc=\"Validating\",\n leave=False,\n unit='batch',\n unit_scale=True,\n mininterval=1,\n maxinterval=5,\n dynamic_ncols=True):\n iter_times += 1\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n with autograd.predict_mode():\n # loss for d\n out = d(data)\n err2real = loss(out, true_label)\n\n fake_img = g(nosise)\n out = d(fake_img)\n err2fake = loss(out, fake_label)\n\n err4dis = err2real + err2fake\n d_val_loss += err4dis.mean().asscalar()\n\n # loss for g\n fake_img = g(nosise)\n out = d(fake_img)\n err4gen = loss(out, true_label)\n g_val_loss += err4gen.mean().asscalar()\n return g_val_loss / iter_times, d_val_loss / iter_times\n\n\n# %% begin training\nd_iter_times = 0\ng_iter_times = 0\nd_update_times = 0\ng_update_times = 0\ng_train_loss = 0.0\nd_train_loss = 0.0\nlogger.info(\"Begin training\")\nfor ep in tqdm.tqdm(range(epoch_start, epoch + 1),\n total=epoch,\n desc=\"Total Progress\",\n leave=False,\n initial=epoch_start,\n unit='epoch',\n unit_scale=True,\n mininterval=10,\n maxinterval=100,\n dynamic_ncols=True):\n\n for data, _ in tqdm.tqdm(\n train_loader,\n desc=\"Epoch {}\".format(ep),\n leave=False,\n unit='batch',\n unit_scale=True,\n mininterval=1,\n maxinterval=5,\n dynamic_ncols=True):\n bs = len(data)\n nosise = make_noises(bs)\n data = data.as_in_context(CTX)\n # begin training discriminator\n with autograd.record():\n d_iter_times += 1\n d_update_times += 1\n # train with real image\n out = discriminator(data)\n err2real = loss(out, true_label)\n\n # train with fake image\n # detach the input, or its gradients will be computed\n with autograd.predict_mode():\n fake_img = generator(nosise)\n out = discriminator(fake_img.detach())\n err2fake = loss(out, fake_label)\n\n err4dis = err2real + err2fake\n err4dis.backward()\n trainer_dis.step(bs)\n d_train_loss += err4dis.mean().asscalar()\n\n if d_iter_times % 5 == 0:\n g_iter_times += 1\n g_update_times += 1\n # begin training generator\n with autograd.record():\n fake_img = generator(nosise)\n with autograd.predict_mode():\n out = discriminator(fake_img)\n err4gen = loss(out, true_label)\n err4gen.backward()\n trainer_gen.step(bs)\n g_train_loss += err4gen.mean().asscalar()\n\n g_train_loss /= d_iter_times\n d_train_loss /= g_iter_times\n\n # use validation set or not\n if should_use_val:\n g_val_loss, d_val_loss = validation(generator, discriminator, val_loader)\n history.update([g_train_loss, g_val_loss, d_train_loss, d_val_loss])\n logger.info(\"Generator[train: {}, val: {}]\".format(g_train_loss, g_val_loss))\n logger.info(\"Discriminator[train: {}, val: {}]\".format(d_train_loss, d_val_loss))\n else:\n history.update([g_train_loss, d_train_loss])\n logger.info(\"Generator[{}], Discriminator[{}]\".format(g_train_loss, d_train_loss))\n\n g_train_loss = 0.0\n d_train_loss = 0.0\n d_iter_times = 0\n g_iter_times = 0\n\n # make a prediction\n if g_update_times % pred_per_epoch == 0:\n fake = generator(make_noises(1))[0]\n unique_fake = generator(pred_noise)[0]\n pred_path = 'logs/pred-dcgan'\n pred_unique_path = os.path.join(pred_path, 'unique')\n makedirs(pred_path)\n makedirs(pred_unique_path)\n vis.show_img(fake.transpose((1, 2, 0)), save_path=pred_path)\n vis.show_img(unique_fake.transpose((1, 2, 0)), save_path=pred_unique_path)\n\n # save history plot every epoch\n history.plot(save_path='logs/histories-dcgan')\n\n # save checkpoint\n if should_save_checkpoint:\n if ep % save_per_epoch == 0:\n generator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.format(ep)))\n discriminator.save_parameters(os.path.join(save_dir, 'discriminator_{:04d}.params'.format(ep)))\n\nhistory.plot(save_path='logs/histories-dcgan')\ngenerator.save_parameters(os.path.join(save_dir, 'generator_{:04d}.params'.format(ep)))\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def calcLuckyNumber(x):
resultSet = set()
for i in range(30):
for j in range(30):
for k in range(30):
number = pow(3, i) * pow(5, j) * pow(7, k)
if number > 1 and number <= x:
resultSet.add(number)
return resultSet
x = input("input number: ")
if x != '':
x = int(x)
if x > 0:
result = calcLuckyNumber(x)
print(len(result))
|
normal
|
{
"blob_id": "49a9fb43f3651d28d3ffac5e33d10c428afd08fd",
"index": 6072,
"step-1": "<mask token>\n",
"step-2": "def calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n return resultSet\n\n\n<mask token>\n",
"step-3": "def calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n return resultSet\n\n\n<mask token>\nif x != '':\n x = int(x)\n if x > 0:\n result = calcLuckyNumber(x)\n print(len(result))\n",
"step-4": "def calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n return resultSet\n\n\nx = input('input number: ')\nif x != '':\n x = int(x)\n if x > 0:\n result = calcLuckyNumber(x)\n print(len(result))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n\n return resultSet\n\nx = input(\"input number: \")\nif x != '':\n x = int(x)\n if x > 0:\n result = calcLuckyNumber(x)\n print(len(result))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" AuthService class module.
"""
from urllib.parse import urlencode
from http.client import HTTPConnection, HTTPResponse, HTTPException
from dms2021sensor.data.rest.exc import NotFoundError
class AuthService():
""" REST client to connect to the authentication service.
"""
def __init__(self, host: str, port: int):
""" Constructor method.
Initializes the client.
---
Parameters:
- host: The authentication service host string.
- port: The authentication service port number.
"""
self.__host: str = host
self.__port: int = port
def __get_connection(self) -> HTTPConnection:
""" Creates a new connection to the authentication server.
---
Returns:
The connection object.
"""
return HTTPConnection(self.__host, self.__port)
def has_right(self, username: str, right: str) -> bool:
""" Determines whether a given user from the authentication server
has a certain right or not.
---
Parameters:
- username: The user name string.
- right: The right name.
Returns:
True if the user has the given right
Throws:
- NotFoundError: if the user does not have the right, the user does not
exist, or the right does not exist.
- HTTPException: On an unhandled 500 error.
"""
form: str = urlencode({'username': username, 'right': right})
headers: dict = {
'Content-type': 'application/x-www-form-urlencoded'
}
connection: HTTPConnection = self.__get_connection()
connection.request('GET', '/users/'+str(username)+'/rights/'+str(right), form, headers)
response: HTTPResponse = connection.getresponse()
if response.status == 200:
return True
if response.status == 404:
raise NotFoundError()
if response.status == 500:
raise HTTPException('Server error')
return False
|
normal
|
{
"blob_id": "1438a268780217e647999ba031aa4a50a6912d2f",
"index": 3069,
"step-1": "<mask token>\n\n\nclass AuthService:\n <mask token>\n <mask token>\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthService:\n <mask token>\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AuthService:\n \"\"\" REST client to connect to the authentication service.\n \"\"\"\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n\n def has_right(self, username: str, right: str) ->bool:\n \"\"\" Determines whether a given user from the authentication server\n has a certain right or not.\n ---\n Parameters:\n - username: The user name string.\n - right: The right name.\n Returns:\n True if the user has the given right\n Throws:\n - NotFoundError: if the user does not have the right, the user does not\n exist, or the right does not exist.\n - HTTPException: On an unhandled 500 error.\n \"\"\"\n form: str = urlencode({'username': username, 'right': right})\n headers: dict = {'Content-type': 'application/x-www-form-urlencoded'}\n connection: HTTPConnection = self.__get_connection()\n connection.request('GET', '/users/' + str(username) + '/rights/' +\n str(right), form, headers)\n response: HTTPResponse = connection.getresponse()\n if response.status == 200:\n return True\n if response.status == 404:\n raise NotFoundError()\n if response.status == 500:\n raise HTTPException('Server error')\n return False\n",
"step-4": "<mask token>\nfrom urllib.parse import urlencode\nfrom http.client import HTTPConnection, HTTPResponse, HTTPException\nfrom dms2021sensor.data.rest.exc import NotFoundError\n\n\nclass AuthService:\n \"\"\" REST client to connect to the authentication service.\n \"\"\"\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) ->HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n\n def has_right(self, username: str, right: str) ->bool:\n \"\"\" Determines whether a given user from the authentication server\n has a certain right or not.\n ---\n Parameters:\n - username: The user name string.\n - right: The right name.\n Returns:\n True if the user has the given right\n Throws:\n - NotFoundError: if the user does not have the right, the user does not\n exist, or the right does not exist.\n - HTTPException: On an unhandled 500 error.\n \"\"\"\n form: str = urlencode({'username': username, 'right': right})\n headers: dict = {'Content-type': 'application/x-www-form-urlencoded'}\n connection: HTTPConnection = self.__get_connection()\n connection.request('GET', '/users/' + str(username) + '/rights/' +\n str(right), form, headers)\n response: HTTPResponse = connection.getresponse()\n if response.status == 200:\n return True\n if response.status == 404:\n raise NotFoundError()\n if response.status == 500:\n raise HTTPException('Server error')\n return False\n",
"step-5": "\"\"\" AuthService class module.\n\"\"\"\n\nfrom urllib.parse import urlencode\nfrom http.client import HTTPConnection, HTTPResponse, HTTPException\nfrom dms2021sensor.data.rest.exc import NotFoundError\n\n\nclass AuthService():\n \"\"\" REST client to connect to the authentication service.\n \"\"\"\n\n def __init__(self, host: str, port: int):\n \"\"\" Constructor method.\n\n Initializes the client.\n ---\n Parameters:\n - host: The authentication service host string.\n - port: The authentication service port number.\n \"\"\"\n self.__host: str = host\n self.__port: int = port\n\n def __get_connection(self) -> HTTPConnection:\n \"\"\" Creates a new connection to the authentication server.\n ---\n Returns:\n The connection object.\n \"\"\"\n return HTTPConnection(self.__host, self.__port)\n\n def has_right(self, username: str, right: str) -> bool:\n \"\"\" Determines whether a given user from the authentication server\n has a certain right or not.\n ---\n Parameters:\n - username: The user name string.\n - right: The right name.\n Returns:\n True if the user has the given right\n Throws:\n - NotFoundError: if the user does not have the right, the user does not\n exist, or the right does not exist.\n - HTTPException: On an unhandled 500 error.\n \"\"\"\n form: str = urlencode({'username': username, 'right': right})\n headers: dict = {\n 'Content-type': 'application/x-www-form-urlencoded'\n }\n connection: HTTPConnection = self.__get_connection()\n connection.request('GET', '/users/'+str(username)+'/rights/'+str(right), form, headers)\n response: HTTPResponse = connection.getresponse()\n if response.status == 200:\n return True\n if response.status == 404:\n raise NotFoundError()\n if response.status == 500:\n raise HTTPException('Server error')\n return False\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.