code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
# Cookies Keys
class Cookies:
USER_TOKEN = "utoken"
# Session Keys
class Session:
USER_ROOT_ID = "x-root-id"
class APIStatisticsCollection:
API_ACTION = "x-stats-api-action"
DICT_PARAMS = "x-stats-param-dict"
DICT_RESPONSE = "x-stats-resp-dict"
SUCCESS = "x-stats-success"
COLLECT = "x-stats-collect"
# Param Dict Prefix
class ParamDictPrefix:
PostKey = "x-" # Used in http POST params from HTML forms
|
normal
|
{
"blob_id": "d0e5a3a6db0e27ecf157294850a48a19750a5ac2",
"index": 1667,
"step-1": "<mask token>\n\n\nclass Session:\n <mask token>\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-2": "<mask token>\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-3": "class Cookies:\n <mask token>\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-4": "class Cookies:\n USER_TOKEN = 'utoken'\n\n\nclass Session:\n USER_ROOT_ID = 'x-root-id'\n\n\n class APIStatisticsCollection:\n API_ACTION = 'x-stats-api-action'\n DICT_PARAMS = 'x-stats-param-dict'\n DICT_RESPONSE = 'x-stats-resp-dict'\n SUCCESS = 'x-stats-success'\n COLLECT = 'x-stats-collect'\n\n\nclass ParamDictPrefix:\n PostKey = 'x-'\n",
"step-5": "# Cookies Keys\nclass Cookies:\n USER_TOKEN = \"utoken\"\n\n\n# Session Keys\nclass Session:\n USER_ROOT_ID = \"x-root-id\"\n\n class APIStatisticsCollection:\n API_ACTION = \"x-stats-api-action\"\n DICT_PARAMS = \"x-stats-param-dict\"\n DICT_RESPONSE = \"x-stats-resp-dict\"\n SUCCESS = \"x-stats-success\"\n\n COLLECT = \"x-stats-collect\"\n\n\n# Param Dict Prefix\nclass ParamDictPrefix:\n PostKey = \"x-\" # Used in http POST params from HTML forms\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
<|reserved_special_token_0|>
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
<|reserved_special_token_0|>
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
<|reserved_special_token_0|>
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
def do_minimisation(self, x, data, weights=1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters,
args=(x, data, weights), kws=kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, fit_report
import logging
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
def do_minimisation(self, x, data, weights=1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters,
args=(x, data, weights), kws=kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
<|reserved_special_token_1|>
"""After seeing how great the lmfit package, I was inspired to create my own
object using it. This acts as a fitting template.
"""
##-------------------------------PREAMBLE-----------------------------------##
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, fit_report
import logging
##-------------------------------CLASS DEFINITION-----------------------------------##
class FitTemplate():
def __init__(self, fit_function, log_dir = None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
#setup logging. warning level is standard and is sent to stdout. info is requested by log_dir argument,
#and is printed to log file
if log_dir is not None:
logging.basicConfig(filename=log_dir +'log.log', level=logging.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data,weights,**kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data)*weights)**2
def do_minimisation(self, x, data, weights = 1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters, args = (x, data, weights), kws = kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError("No fit result! Do a fit before asking for")
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info((fit_report(self.fit_result)))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel = None, ylabel = None, title = None, errorbars = None, label = None, ax = None, c = None, colour_index = None, **kwargs):
if ax is None:
_, ax = plt.subplots(1 ,1, constrained_layout=True, figsize=(18, 9))
plt.rcParams.update({'font.size': 16})
colours = ['b','m','c','r','tab:orange', 'tab:pink']
#decide colour
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
#scatter plot
ax.scatter(x, y, color = color)
#plot errors
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls = 'none', c = color, capsize = 3)
#plot model
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.params.valuesdict(), **kwargs), c = color, label = label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
|
flexible
|
{
"blob_id": "9e16921d83a5f62aad694b26a92b57b97ccda461",
"index": 1651,
"step-1": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n <mask token>\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n <mask token>\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-2": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n <mask token>\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-3": "<mask token>\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n\n def do_minimisation(self, x, data, weights=1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters,\n args=(x, data, weights), kws=kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom lmfit import minimize, Parameters, fit_report\nimport logging\n\n\nclass FitTemplate:\n\n def __init__(self, fit_function, log_dir=None):\n self.fit_function = fit_function\n self.parameters = Parameters()\n self.fit_result = None\n if log_dir is not None:\n logging.basicConfig(filename=log_dir + 'log.log', level=logging\n .INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n\n def residuals_wrapper(self, parameters, x, data, weights, **kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data) * weights) ** 2\n\n def do_minimisation(self, x, data, weights=1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters,\n args=(x, data, weights), kws=kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None:\n raise ValueError('No fit result! Do a fit before asking for')\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print()\n\n def print_fit_result(self):\n logging.info(fit_report(self.fit_result))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,\n errorbars=None, label=None, ax=None, c=None, colour_index=None, **\n kwargs):\n if ax is None:\n _, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)\n )\n plt.rcParams.update({'font.size': 16})\n colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']\n if c is not None:\n color = c\n elif colour_index is not None:\n color = colours[colour_index]\n else:\n color = colours[0]\n ax.scatter(x, y, color=color)\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)\n fitdomain = np.linspace(x[0], x[-1], 1000)\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.\n params.valuesdict(), **kwargs), c=color, label=label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax\n",
"step-5": "\"\"\"After seeing how great the lmfit package, I was inspired to create my own\nobject using it. This acts as a fitting template. \n\"\"\"\n##-------------------------------PREAMBLE-----------------------------------##\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom lmfit import minimize, Parameters, fit_report \nimport logging \n\n##-------------------------------CLASS DEFINITION-----------------------------------##\n\nclass FitTemplate(): \n def __init__(self, fit_function, log_dir = None):\n self.fit_function = fit_function \n self.parameters = Parameters()\n self.fit_result = None\n\n #setup logging. warning level is standard and is sent to stdout. info is requested by log_dir argument,\n #and is printed to log file\n if log_dir is not None: \n logging.basicConfig(filename=log_dir +'log.log', level=logging.INFO)\n else:\n logging.basicConfig(level=logging.CRITICAL)\n \n\n def residuals_wrapper(self, parameters, x, data,weights,**kwargs):\n model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)\n return ((model_values - data)*weights)**2\n \n def do_minimisation(self, x, data, weights = 1, **kwargs):\n self.fit_result = minimize(self.residuals_wrapper, self.parameters, args = (x, data, weights), kws = kwargs)\n logging.info('Fit Result')\n logging.info('==========')\n return self.fit_result\n\n def get_opt_parameters(self):\n if self.fit_result is None: \n raise ValueError(\"No fit result! Do a fit before asking for\")\n return self.fit_result.params.valuesdict()\n\n def print_parameters(self):\n self.parameters.pretty_print() \n \n def print_fit_result(self):\n logging.info((fit_report(self.fit_result)))\n print(fit_report(self.fit_result))\n\n def plot_fit(self, x, y, xlabel = None, ylabel = None, title = None, errorbars = None, label = None, ax = None, c = None, colour_index = None, **kwargs): \n\n if ax is None:\n _, ax = plt.subplots(1\t,1, constrained_layout=True, figsize=(18, 9))\n plt.rcParams.update({'font.size': 16}) \n colours = ['b','m','c','r','tab:orange', 'tab:pink']\n\n #decide colour \n if c is not None: \n color = c \n elif colour_index is not None: \n color = colours[colour_index]\n else: \n color = colours[0]\n\n #scatter plot\n ax.scatter(x, y, color = color)\n #plot errors\n if errorbars is not None:\n ax.errorbar(x, y, errorbars, ls = 'none', c = color, capsize = 3)\n #plot model\n fitdomain = np.linspace(x[0], x[-1], 1000)\t\n ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.params.valuesdict(), **kwargs), c = color, label = label)\n plt.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n plt.grid()\n return ax \n \n \t\t\n \n \n \n\n\n\n\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import uuid
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.profile import region_provider
# 注意:不要更改
from celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest
class SendMes(object):
REGION = "cn-hangzhou"
PRODUCT_NAME = "Dysmsapi"
DOMAIN = "dysmsapi.aliyuncs.com"
# 申请的ACCESS_KEY_ID和ACCESS_KEY_SECRET
ACCESS_KEY_ID = "LTAIYEeWFSUAFcYy"
ACCESS_KEY_SECRET = "FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY"
acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)
region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)
def send_2_mes(self, phone_numbers, code):
# 申请的短信签名 和 短信模板
sign_name = 'SpiritBlog'
template_code = 'SMS_137657397'
business_id = uuid.uuid1()
template_param = '{"code":"%s"}' % code
smsRequest = SendSmsRequest.SendSmsRequest()
# 申请的短信模板编码,必填
smsRequest.set_TemplateCode(template_code)
# 短信模板变量参数
if template_param is not None:
smsRequest.set_TemplateParam(template_param)
# 设置业务请求流水号,必填。
smsRequest.set_OutId(business_id)
# 短信签名
smsRequest.set_SignName(sign_name)
# 短信发送的号码列表,必填。
smsRequest.set_PhoneNumbers(phone_numbers)
# 调用短信发送接口,返回json
smsResponse = self.acs_client.do_action_with_exception(smsRequest)
return smsResponse
# sm = SendMes()
# sm.send_2_mes(15071176826, 333333)
|
normal
|
{
"blob_id": "daecbf5280c199b31f3b9d9818df245d9cd165a7",
"index": 4295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SendMes(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SendMes(object):\n REGION = 'cn-hangzhou'\n PRODUCT_NAME = 'Dysmsapi'\n DOMAIN = 'dysmsapi.aliyuncs.com'\n ACCESS_KEY_ID = 'LTAIYEeWFSUAFcYy'\n ACCESS_KEY_SECRET = 'FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY'\n acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)\n region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n\n def send_2_mes(self, phone_numbers, code):\n sign_name = 'SpiritBlog'\n template_code = 'SMS_137657397'\n business_id = uuid.uuid1()\n template_param = '{\"code\":\"%s\"}' % code\n smsRequest = SendSmsRequest.SendSmsRequest()\n smsRequest.set_TemplateCode(template_code)\n if template_param is not None:\n smsRequest.set_TemplateParam(template_param)\n smsRequest.set_OutId(business_id)\n smsRequest.set_SignName(sign_name)\n smsRequest.set_PhoneNumbers(phone_numbers)\n smsResponse = self.acs_client.do_action_with_exception(smsRequest)\n return smsResponse\n",
"step-4": "import uuid\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.profile import region_provider\nfrom celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\n\n\nclass SendMes(object):\n REGION = 'cn-hangzhou'\n PRODUCT_NAME = 'Dysmsapi'\n DOMAIN = 'dysmsapi.aliyuncs.com'\n ACCESS_KEY_ID = 'LTAIYEeWFSUAFcYy'\n ACCESS_KEY_SECRET = 'FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY'\n acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)\n region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n\n def send_2_mes(self, phone_numbers, code):\n sign_name = 'SpiritBlog'\n template_code = 'SMS_137657397'\n business_id = uuid.uuid1()\n template_param = '{\"code\":\"%s\"}' % code\n smsRequest = SendSmsRequest.SendSmsRequest()\n smsRequest.set_TemplateCode(template_code)\n if template_param is not None:\n smsRequest.set_TemplateParam(template_param)\n smsRequest.set_OutId(business_id)\n smsRequest.set_SignName(sign_name)\n smsRequest.set_PhoneNumbers(phone_numbers)\n smsResponse = self.acs_client.do_action_with_exception(smsRequest)\n return smsResponse\n",
"step-5": "import uuid\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.profile import region_provider\n\n\n# 注意:不要更改\nfrom celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\n\n\nclass SendMes(object):\n\tREGION = \"cn-hangzhou\"\n\tPRODUCT_NAME = \"Dysmsapi\"\n\tDOMAIN = \"dysmsapi.aliyuncs.com\"\n\n\t# 申请的ACCESS_KEY_ID和ACCESS_KEY_SECRET\n\tACCESS_KEY_ID = \"LTAIYEeWFSUAFcYy\"\n\tACCESS_KEY_SECRET = \"FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY\"\n\n\tacs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)\n\tregion_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n\n\tdef send_2_mes(self, phone_numbers, code):\n\t\t# 申请的短信签名 和 短信模板\n\t\tsign_name = 'SpiritBlog'\n\t\ttemplate_code = 'SMS_137657397'\n\t\tbusiness_id = uuid.uuid1()\n\t\ttemplate_param = '{\"code\":\"%s\"}' % code\n\t\tsmsRequest = SendSmsRequest.SendSmsRequest()\n\t\t# 申请的短信模板编码,必填\n\t\tsmsRequest.set_TemplateCode(template_code)\n\n\t\t# 短信模板变量参数\n\t\tif template_param is not None:\n\t\t\tsmsRequest.set_TemplateParam(template_param)\n\n\t\t# 设置业务请求流水号,必填。\n\t\tsmsRequest.set_OutId(business_id)\n\n\t\t# 短信签名\n\t\tsmsRequest.set_SignName(sign_name)\n\n\t\t# 短信发送的号码列表,必填。\n\t\tsmsRequest.set_PhoneNumbers(phone_numbers)\n\n\t\t# 调用短信发送接口,返回json\n\t\tsmsResponse = self.acs_client.do_action_with_exception(smsRequest)\n\t\treturn smsResponse\n\n# sm = SendMes()\n# sm.send_2_mes(15071176826, 333333)\n",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
from flask import Flask, render_template, url_for, request, redirect, session, flash
import os, json
from usuarios import crearUsuario, comprobarUsuario
from busqueda import filtrado
from compra import procesarCompra, Dinero
app = Flask(__name__)
catalogo_data = json.loads(open(os.path.join(app.root_path,'json/catalogo.json')).read())
peliculas = catalogo_data['peliculas']
# Ruta de la pagina index de la aplicacion
@app.route("/", methods = ['GET','POST'])
def index():
if(request.method == 'POST'):
peliculasFiltradas = filtrado(peliculas, request)
return render_template('index.html', peliculas = peliculasFiltradas)
else:
return render_template('index.html', peliculas = peliculas)
# Ruta de la informacion de una pelicula
@app.route("/info/<pelicula>", methods = ['GET', 'POST'])
def informacionPelicula(pelicula):
peliculas = catalogo_data['peliculas']
if(request.method == 'POST'):
return redirect(url_for('index'), code = 307)
for aux in peliculas:
if aux['titulo'] == pelicula.replace("%20", " "):
return render_template('informacion-pelicula.html', pelicula = aux)
return redirect(url_for('index'))
# Ruta de la pagina html de login
@app.route("/login", methods=['GET','POST'])
def login():
if ('user' in session):
return redirect(url_for('index'))
else:
if(request.method == 'POST'):
try:
comprobarUsuario(request.form['nombre-usuario'], request.form['contrasenia'])
session['user'] = request.form['nombre-usuario']
if 'last_user' in session:
print(session['last_user'])
if session['last_user'] == session['user']:
if (('last_carrito' in session) and ('last_precio' in session)):
session['carrito'] = session['last_carrito']
session['precio'] = session['last_precio']
return redirect(url_for('index'))
except Exception as error:
return render_template('login.html', error = error)
else:
return render_template('login.html')
# Ruta de la pagina html de registro
@app.route("/register", methods = ['GET', 'POST'])
def register():
if ('user' in session):
return redirect(url_for('index'))
else:
if(request.method == 'POST'):
try:
crearUsuario(request.form['cuestionario_nombre'], request.form['cuestionario_nombreCompleto'], request.form['cuestionario_contrasenia'], request.form['cuestionario_correo'], request.form['cuestionario_cuenta'])
session['user'] = request.form['cuestionario_nombre']
return redirect(url_for('index'))
except Exception as error:
return render_template('register.html', error = error)
else:
return render_template('register.html')
#Ruta para cerrar sesion
@app.route('/logout')
def logout():
if('user' in session):
if(('carrito' in session) and ('precio' in session)):
session['last_user'] = session['user']
session['last_carrito'] = session['carrito']
session['last_precio'] = session['precio']
session.pop('user', None)
session.pop('carrito', None)
session.pop('precio', None)
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
#Ruta para ver el carrito
@app.route('/carrito', methods = ['GET', 'POST'])
def carrito():
if(request.method == 'POST'):
return redirect(url_for('index'), code = 307)
else:
return render_template('carrito.html', peliculas = peliculas)
#Metodo get para aniadir una pelicula al carrito de compra
@app.route("/info/<pelicula>/aniadir-carrito", methods = ['GET'])
def aniadir(pelicula):
if 'precio' not in session:
session['precio'] = 0
for aux in peliculas:
if aux['titulo'] == pelicula.replace("%20", " "):
if 'carrito' in session:
for deter in session['carrito']:
if(deter['nombre'] == pelicula.replace("%20", " ")):
deter['cantidad'] = deter['cantidad'] + 1
session['precio'] = session['precio'] + aux['precio']
return redirect(url_for('index'))
add = {"nombre" : pelicula.replace("%20", " "), "cantidad": 1}
session['carrito'].append(add)
session['precio'] = session['precio'] + aux['precio']
return redirect(url_for('index'))
else:
session['carrito'] = []
add = {"nombre" : pelicula.replace("%20", " "), "cantidad": 1}
session['carrito'].append(add)
session['precio'] = session['precio'] + aux['precio']
return redirect(url_for('index'))
return redirect(url_for('index'))
#Metodo get para eliminar una pelicula del carrito de compra
@app.route("/info/<pelicula>/del-carrito", methods = ['GET'])
def delete(pelicula):
if 'precio' not in session:
return redirect(url_for('carrito'))
for aux in peliculas:
if aux['titulo'] == pelicula.replace("%20", " "):
if 'carrito' in session:
#Quitar pelicula del carrito
i = 0
for deter in session['carrito']:
if(deter['nombre'] == pelicula.replace("%20", " ")):
if(deter['cantidad'] == 1):
del(session['carrito'][i])
else:
deter['cantidad'] = deter['cantidad'] - 1
i = i + 1
#Modificar dinero del carrito
if (len(session['carrito']) == 0):
session['precio'] = 0
else:
session['precio'] = session['precio'] - aux['precio']
if(session['precio'] < 0):
session['precio'] = 0
else:
return redirect(url_for('carrito'))
return redirect(url_for('carrito'))
#Pagina de cofirmacion del carrito de compra
@app.route("/confirmacion", methods = ['GET', 'POST'])
def confirmacion():
if('user' in session):
if(('carrito' in session) and (len(session['carrito']) > 0)):
return render_template('confirmacion.html', peliculas = peliculas)
else:
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
@app.route("/confirmacion/push")
def push():
try:
procesarCompra(session, peliculas)
session.pop('carrito', None)
session.pop('precio', None)
return redirect(url_for('index'))
except Dinero as error:
flash("Parece que no tienes dinero suficiente")
return redirect(url_for('index'))
except Exception as error:
flash(error)
return redirect(url_for('index', error = error))
# Ruta del historial de compra de un usuario
@app.route("/historial", methods = ['GET', 'POST'])
def historial():
if('user' not in session):
return redirect(url_for('index'))
else:
PATH = 'usuarios/' + session['user'] + '/historial.json'
historial = json.loads(open(PATH).read())
if(request.method == 'POST'):
return redirect(url_for('index'), code = 307)
else:
return render_template('historial.html', historial = historial)
#Mantener cookies despues de cerrar el navegador
@app.before_request
def session_management():
session.permanent = True
if __name__ == "__main__":
app.secret_key = os.urandom(24)
app.run(host='0.0.0.0', port=5001, debug=True)
|
normal
|
{
"blob_id": "ebb4cf1ec2baa7bd0d29e3ae88b16e65cf76a88a",
"index": 3679,
"step-1": "<mask token>\n\n\n@app.route('/info/<pelicula>', methods=['GET', 'POST'])\ndef informacionPelicula(pelicula):\n peliculas = catalogo_data['peliculas']\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n return render_template('informacion-pelicula.html', pelicula=aux)\n return redirect(url_for('index'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n comprobarUsuario(request.form['nombre-usuario'], request.form[\n 'contrasenia'])\n session['user'] = request.form['nombre-usuario']\n if 'last_user' in session:\n print(session['last_user'])\n if session['last_user'] == session['user']:\n if 'last_carrito' in session and 'last_precio' in session:\n session['carrito'] = session['last_carrito']\n session['precio'] = session['last_precio']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('login.html', error=error)\n else:\n return render_template('login.html')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n crearUsuario(request.form['cuestionario_nombre'], request.form[\n 'cuestionario_nombreCompleto'], request.form[\n 'cuestionario_contrasenia'], request.form[\n 'cuestionario_correo'], request.form['cuestionario_cuenta'])\n session['user'] = request.form['cuestionario_nombre']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('register.html', error=error)\n else:\n return render_template('register.html')\n\n\n@app.route('/logout')\ndef logout():\n if 'user' in session:\n if 'carrito' in session and 'precio' in session:\n session['last_user'] = session['user']\n session['last_carrito'] = session['carrito']\n session['last_precio'] = session['precio']\n session.pop('user', None)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n<mask token>\n\n\n@app.route('/info/<pelicula>/aniadir-carrito', methods=['GET'])\ndef aniadir(pelicula):\n if 'precio' not in session:\n session['precio'] = 0\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n deter['cantidad'] = deter['cantidad'] + 1\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n else:\n session['carrito'] = []\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n return redirect(url_for('index'))\n\n\n@app.route('/info/<pelicula>/del-carrito', methods=['GET'])\ndef delete(pelicula):\n if 'precio' not in session:\n return redirect(url_for('carrito'))\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n i = 0\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n if deter['cantidad'] == 1:\n del session['carrito'][i]\n else:\n deter['cantidad'] = deter['cantidad'] - 1\n i = i + 1\n if len(session['carrito']) == 0:\n session['precio'] = 0\n else:\n session['precio'] = session['precio'] - aux['precio']\n if session['precio'] < 0:\n session['precio'] = 0\n else:\n return redirect(url_for('carrito'))\n return redirect(url_for('carrito'))\n\n\n@app.route('/confirmacion', methods=['GET', 'POST'])\ndef confirmacion():\n if 'user' in session:\n if 'carrito' in session and len(session['carrito']) > 0:\n return render_template('confirmacion.html', peliculas=peliculas)\n else:\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/confirmacion/push')\ndef push():\n try:\n procesarCompra(session, peliculas)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n except Dinero as error:\n flash('Parece que no tienes dinero suficiente')\n return redirect(url_for('index'))\n except Exception as error:\n flash(error)\n return redirect(url_for('index', error=error))\n\n\n<mask token>\n\n\n@app.before_request\ndef session_management():\n session.permanent = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n peliculasFiltradas = filtrado(peliculas, request)\n return render_template('index.html', peliculas=peliculasFiltradas)\n else:\n return render_template('index.html', peliculas=peliculas)\n\n\n@app.route('/info/<pelicula>', methods=['GET', 'POST'])\ndef informacionPelicula(pelicula):\n peliculas = catalogo_data['peliculas']\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n return render_template('informacion-pelicula.html', pelicula=aux)\n return redirect(url_for('index'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n comprobarUsuario(request.form['nombre-usuario'], request.form[\n 'contrasenia'])\n session['user'] = request.form['nombre-usuario']\n if 'last_user' in session:\n print(session['last_user'])\n if session['last_user'] == session['user']:\n if 'last_carrito' in session and 'last_precio' in session:\n session['carrito'] = session['last_carrito']\n session['precio'] = session['last_precio']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('login.html', error=error)\n else:\n return render_template('login.html')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n crearUsuario(request.form['cuestionario_nombre'], request.form[\n 'cuestionario_nombreCompleto'], request.form[\n 'cuestionario_contrasenia'], request.form[\n 'cuestionario_correo'], request.form['cuestionario_cuenta'])\n session['user'] = request.form['cuestionario_nombre']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('register.html', error=error)\n else:\n return render_template('register.html')\n\n\n@app.route('/logout')\ndef logout():\n if 'user' in session:\n if 'carrito' in session and 'precio' in session:\n session['last_user'] = session['user']\n session['last_carrito'] = session['carrito']\n session['last_precio'] = session['precio']\n session.pop('user', None)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/carrito', methods=['GET', 'POST'])\ndef carrito():\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n else:\n return render_template('carrito.html', peliculas=peliculas)\n\n\n@app.route('/info/<pelicula>/aniadir-carrito', methods=['GET'])\ndef aniadir(pelicula):\n if 'precio' not in session:\n session['precio'] = 0\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n deter['cantidad'] = deter['cantidad'] + 1\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n else:\n session['carrito'] = []\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n return redirect(url_for('index'))\n\n\n@app.route('/info/<pelicula>/del-carrito', methods=['GET'])\ndef delete(pelicula):\n if 'precio' not in session:\n return redirect(url_for('carrito'))\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n i = 0\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n if deter['cantidad'] == 1:\n del session['carrito'][i]\n else:\n deter['cantidad'] = deter['cantidad'] - 1\n i = i + 1\n if len(session['carrito']) == 0:\n session['precio'] = 0\n else:\n session['precio'] = session['precio'] - aux['precio']\n if session['precio'] < 0:\n session['precio'] = 0\n else:\n return redirect(url_for('carrito'))\n return redirect(url_for('carrito'))\n\n\n@app.route('/confirmacion', methods=['GET', 'POST'])\ndef confirmacion():\n if 'user' in session:\n if 'carrito' in session and len(session['carrito']) > 0:\n return render_template('confirmacion.html', peliculas=peliculas)\n else:\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/confirmacion/push')\ndef push():\n try:\n procesarCompra(session, peliculas)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n except Dinero as error:\n flash('Parece que no tienes dinero suficiente')\n return redirect(url_for('index'))\n except Exception as error:\n flash(error)\n return redirect(url_for('index', error=error))\n\n\n@app.route('/historial', methods=['GET', 'POST'])\ndef historial():\n if 'user' not in session:\n return redirect(url_for('index'))\n else:\n PATH = 'usuarios/' + session['user'] + '/historial.json'\n historial = json.loads(open(PATH).read())\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n else:\n return render_template('historial.html', historial=historial)\n\n\n@app.before_request\ndef session_management():\n session.permanent = True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n peliculasFiltradas = filtrado(peliculas, request)\n return render_template('index.html', peliculas=peliculasFiltradas)\n else:\n return render_template('index.html', peliculas=peliculas)\n\n\n@app.route('/info/<pelicula>', methods=['GET', 'POST'])\ndef informacionPelicula(pelicula):\n peliculas = catalogo_data['peliculas']\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n return render_template('informacion-pelicula.html', pelicula=aux)\n return redirect(url_for('index'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n comprobarUsuario(request.form['nombre-usuario'], request.form[\n 'contrasenia'])\n session['user'] = request.form['nombre-usuario']\n if 'last_user' in session:\n print(session['last_user'])\n if session['last_user'] == session['user']:\n if 'last_carrito' in session and 'last_precio' in session:\n session['carrito'] = session['last_carrito']\n session['precio'] = session['last_precio']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('login.html', error=error)\n else:\n return render_template('login.html')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n crearUsuario(request.form['cuestionario_nombre'], request.form[\n 'cuestionario_nombreCompleto'], request.form[\n 'cuestionario_contrasenia'], request.form[\n 'cuestionario_correo'], request.form['cuestionario_cuenta'])\n session['user'] = request.form['cuestionario_nombre']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('register.html', error=error)\n else:\n return render_template('register.html')\n\n\n@app.route('/logout')\ndef logout():\n if 'user' in session:\n if 'carrito' in session and 'precio' in session:\n session['last_user'] = session['user']\n session['last_carrito'] = session['carrito']\n session['last_precio'] = session['precio']\n session.pop('user', None)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/carrito', methods=['GET', 'POST'])\ndef carrito():\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n else:\n return render_template('carrito.html', peliculas=peliculas)\n\n\n@app.route('/info/<pelicula>/aniadir-carrito', methods=['GET'])\ndef aniadir(pelicula):\n if 'precio' not in session:\n session['precio'] = 0\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n deter['cantidad'] = deter['cantidad'] + 1\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n else:\n session['carrito'] = []\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n return redirect(url_for('index'))\n\n\n@app.route('/info/<pelicula>/del-carrito', methods=['GET'])\ndef delete(pelicula):\n if 'precio' not in session:\n return redirect(url_for('carrito'))\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n i = 0\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n if deter['cantidad'] == 1:\n del session['carrito'][i]\n else:\n deter['cantidad'] = deter['cantidad'] - 1\n i = i + 1\n if len(session['carrito']) == 0:\n session['precio'] = 0\n else:\n session['precio'] = session['precio'] - aux['precio']\n if session['precio'] < 0:\n session['precio'] = 0\n else:\n return redirect(url_for('carrito'))\n return redirect(url_for('carrito'))\n\n\n@app.route('/confirmacion', methods=['GET', 'POST'])\ndef confirmacion():\n if 'user' in session:\n if 'carrito' in session and len(session['carrito']) > 0:\n return render_template('confirmacion.html', peliculas=peliculas)\n else:\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/confirmacion/push')\ndef push():\n try:\n procesarCompra(session, peliculas)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n except Dinero as error:\n flash('Parece que no tienes dinero suficiente')\n return redirect(url_for('index'))\n except Exception as error:\n flash(error)\n return redirect(url_for('index', error=error))\n\n\n@app.route('/historial', methods=['GET', 'POST'])\ndef historial():\n if 'user' not in session:\n return redirect(url_for('index'))\n else:\n PATH = 'usuarios/' + session['user'] + '/historial.json'\n historial = json.loads(open(PATH).read())\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n else:\n return render_template('historial.html', historial=historial)\n\n\n@app.before_request\ndef session_management():\n session.permanent = True\n\n\nif __name__ == '__main__':\n app.secret_key = os.urandom(24)\n app.run(host='0.0.0.0', port=5001, debug=True)\n",
"step-4": "from flask import Flask, render_template, url_for, request, redirect, session, flash\nimport os, json\nfrom usuarios import crearUsuario, comprobarUsuario\nfrom busqueda import filtrado\nfrom compra import procesarCompra, Dinero\napp = Flask(__name__)\ncatalogo_data = json.loads(open(os.path.join(app.root_path,\n 'json/catalogo.json')).read())\npeliculas = catalogo_data['peliculas']\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n peliculasFiltradas = filtrado(peliculas, request)\n return render_template('index.html', peliculas=peliculasFiltradas)\n else:\n return render_template('index.html', peliculas=peliculas)\n\n\n@app.route('/info/<pelicula>', methods=['GET', 'POST'])\ndef informacionPelicula(pelicula):\n peliculas = catalogo_data['peliculas']\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n return render_template('informacion-pelicula.html', pelicula=aux)\n return redirect(url_for('index'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n comprobarUsuario(request.form['nombre-usuario'], request.form[\n 'contrasenia'])\n session['user'] = request.form['nombre-usuario']\n if 'last_user' in session:\n print(session['last_user'])\n if session['last_user'] == session['user']:\n if 'last_carrito' in session and 'last_precio' in session:\n session['carrito'] = session['last_carrito']\n session['precio'] = session['last_precio']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('login.html', error=error)\n else:\n return render_template('login.html')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if 'user' in session:\n return redirect(url_for('index'))\n elif request.method == 'POST':\n try:\n crearUsuario(request.form['cuestionario_nombre'], request.form[\n 'cuestionario_nombreCompleto'], request.form[\n 'cuestionario_contrasenia'], request.form[\n 'cuestionario_correo'], request.form['cuestionario_cuenta'])\n session['user'] = request.form['cuestionario_nombre']\n return redirect(url_for('index'))\n except Exception as error:\n return render_template('register.html', error=error)\n else:\n return render_template('register.html')\n\n\n@app.route('/logout')\ndef logout():\n if 'user' in session:\n if 'carrito' in session and 'precio' in session:\n session['last_user'] = session['user']\n session['last_carrito'] = session['carrito']\n session['last_precio'] = session['precio']\n session.pop('user', None)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/carrito', methods=['GET', 'POST'])\ndef carrito():\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n else:\n return render_template('carrito.html', peliculas=peliculas)\n\n\n@app.route('/info/<pelicula>/aniadir-carrito', methods=['GET'])\ndef aniadir(pelicula):\n if 'precio' not in session:\n session['precio'] = 0\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n deter['cantidad'] = deter['cantidad'] + 1\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n else:\n session['carrito'] = []\n add = {'nombre': pelicula.replace('%20', ' '), 'cantidad': 1}\n session['carrito'].append(add)\n session['precio'] = session['precio'] + aux['precio']\n return redirect(url_for('index'))\n return redirect(url_for('index'))\n\n\n@app.route('/info/<pelicula>/del-carrito', methods=['GET'])\ndef delete(pelicula):\n if 'precio' not in session:\n return redirect(url_for('carrito'))\n for aux in peliculas:\n if aux['titulo'] == pelicula.replace('%20', ' '):\n if 'carrito' in session:\n i = 0\n for deter in session['carrito']:\n if deter['nombre'] == pelicula.replace('%20', ' '):\n if deter['cantidad'] == 1:\n del session['carrito'][i]\n else:\n deter['cantidad'] = deter['cantidad'] - 1\n i = i + 1\n if len(session['carrito']) == 0:\n session['precio'] = 0\n else:\n session['precio'] = session['precio'] - aux['precio']\n if session['precio'] < 0:\n session['precio'] = 0\n else:\n return redirect(url_for('carrito'))\n return redirect(url_for('carrito'))\n\n\n@app.route('/confirmacion', methods=['GET', 'POST'])\ndef confirmacion():\n if 'user' in session:\n if 'carrito' in session and len(session['carrito']) > 0:\n return render_template('confirmacion.html', peliculas=peliculas)\n else:\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n\n\n@app.route('/confirmacion/push')\ndef push():\n try:\n procesarCompra(session, peliculas)\n session.pop('carrito', None)\n session.pop('precio', None)\n return redirect(url_for('index'))\n except Dinero as error:\n flash('Parece que no tienes dinero suficiente')\n return redirect(url_for('index'))\n except Exception as error:\n flash(error)\n return redirect(url_for('index', error=error))\n\n\n@app.route('/historial', methods=['GET', 'POST'])\ndef historial():\n if 'user' not in session:\n return redirect(url_for('index'))\n else:\n PATH = 'usuarios/' + session['user'] + '/historial.json'\n historial = json.loads(open(PATH).read())\n if request.method == 'POST':\n return redirect(url_for('index'), code=307)\n else:\n return render_template('historial.html', historial=historial)\n\n\n@app.before_request\ndef session_management():\n session.permanent = True\n\n\nif __name__ == '__main__':\n app.secret_key = os.urandom(24)\n app.run(host='0.0.0.0', port=5001, debug=True)\n",
"step-5": "from flask import Flask, render_template, url_for, request, redirect, session, flash\nimport os, json\nfrom usuarios import crearUsuario, comprobarUsuario\nfrom busqueda import filtrado\nfrom compra import procesarCompra, Dinero\n\napp = Flask(__name__)\n\ncatalogo_data = json.loads(open(os.path.join(app.root_path,'json/catalogo.json')).read())\npeliculas = catalogo_data['peliculas']\n\n# Ruta de la pagina index de la aplicacion\n@app.route(\"/\", methods = ['GET','POST'])\ndef index():\n\tif(request.method == 'POST'):\n\t\tpeliculasFiltradas = filtrado(peliculas, request)\n\t\treturn render_template('index.html', peliculas = peliculasFiltradas)\n\telse:\n\t\treturn render_template('index.html', peliculas = peliculas)\n\n# Ruta de la informacion de una pelicula\n@app.route(\"/info/<pelicula>\", methods = ['GET', 'POST'])\ndef informacionPelicula(pelicula):\n\tpeliculas = catalogo_data['peliculas']\n\tif(request.method == 'POST'):\n\t\treturn redirect(url_for('index'), code = 307)\n\tfor aux in peliculas:\n\t\tif aux['titulo'] == pelicula.replace(\"%20\", \" \"):\n\t\t\treturn render_template('informacion-pelicula.html', pelicula = aux)\n\treturn redirect(url_for('index'))\n\n# Ruta de la pagina html de login\n@app.route(\"/login\", methods=['GET','POST'])\ndef login():\n\tif ('user' in session):\n\t\treturn redirect(url_for('index'))\n\telse:\n\t\tif(request.method == 'POST'):\n\t\t\ttry:\n\t\t\t\tcomprobarUsuario(request.form['nombre-usuario'], request.form['contrasenia'])\n\t\t\t\tsession['user'] = request.form['nombre-usuario']\n\t\t\t\tif 'last_user' in session:\n\t\t\t\t\tprint(session['last_user'])\n\t\t\t\t\tif session['last_user'] == session['user']:\n\t\t\t\t\t\tif (('last_carrito' in session) and ('last_precio' in session)):\n\t\t\t\t\t\t\tsession['carrito'] = session['last_carrito']\n\t\t\t\t\t\t\tsession['precio'] = session['last_precio']\n\t\t\t\treturn redirect(url_for('index'))\n\t\t\texcept Exception as error:\n\t\t\t\treturn render_template('login.html', error = error)\n\t\telse:\n\t\t\treturn render_template('login.html')\n\n# Ruta de la pagina html de registro\n@app.route(\"/register\", methods = ['GET', 'POST'])\ndef register():\n\tif ('user' in session):\n\t\treturn redirect(url_for('index'))\n\telse:\n\t\tif(request.method == 'POST'):\n\t\t\ttry:\n\t\t\t\tcrearUsuario(request.form['cuestionario_nombre'], request.form['cuestionario_nombreCompleto'], request.form['cuestionario_contrasenia'], request.form['cuestionario_correo'], request.form['cuestionario_cuenta'])\n\t\t\t\tsession['user'] = request.form['cuestionario_nombre']\n\t\t\t\treturn redirect(url_for('index'))\n\t\t\texcept Exception as error:\n\t\t\t\treturn render_template('register.html', error = error)\n\t\telse:\n\t\t\treturn render_template('register.html')\n\n#Ruta para cerrar sesion\n@app.route('/logout')\ndef logout():\n\tif('user' in session):\n\t\tif(('carrito' in session) and ('precio' in session)):\n\t\t\tsession['last_user'] = session['user']\n\t\t\tsession['last_carrito'] = session['carrito']\n\t\t\tsession['last_precio'] = session['precio']\n\t\tsession.pop('user', None)\n\t\tsession.pop('carrito', None)\n\t\tsession.pop('precio', None)\n\t\treturn redirect(url_for('index'))\n\telse:\n\t\treturn redirect(url_for('index'))\n\n#Ruta para ver el carrito\n@app.route('/carrito', methods = ['GET', 'POST'])\ndef carrito():\n\tif(request.method == 'POST'):\n\t\treturn redirect(url_for('index'), code = 307)\n\telse:\n\t\treturn render_template('carrito.html', peliculas = peliculas)\n\n#Metodo get para aniadir una pelicula al carrito de compra\n@app.route(\"/info/<pelicula>/aniadir-carrito\", methods = ['GET'])\ndef aniadir(pelicula):\n\tif 'precio' not in session:\n\t\tsession['precio'] = 0\n\tfor aux in peliculas:\n\t\tif aux['titulo'] == pelicula.replace(\"%20\", \" \"):\n\t\t\tif 'carrito' in session:\n\t\t\t\tfor deter in session['carrito']:\n\t\t\t\t\tif(deter['nombre'] == pelicula.replace(\"%20\", \" \")):\n\t\t\t\t\t\tdeter['cantidad'] = deter['cantidad'] + 1\n\t\t\t\t\t\tsession['precio'] = session['precio'] + aux['precio']\n\t\t\t\t\t\treturn redirect(url_for('index'))\n\t\t\t\tadd = {\"nombre\" : pelicula.replace(\"%20\", \" \"), \"cantidad\": 1}\n\t\t\t\tsession['carrito'].append(add)\n\t\t\t\tsession['precio'] = session['precio'] + aux['precio']\n\t\t\t\treturn redirect(url_for('index'))\n\t\t\telse:\n\t\t\t\tsession['carrito'] = []\n\t\t\t\tadd = {\"nombre\" : pelicula.replace(\"%20\", \" \"), \"cantidad\": 1}\n\t\t\t\tsession['carrito'].append(add)\n\t\t\t\tsession['precio'] = session['precio'] + aux['precio']\n\t\t\t\treturn redirect(url_for('index'))\n\treturn redirect(url_for('index'))\n\n#Metodo get para eliminar una pelicula del carrito de compra\n@app.route(\"/info/<pelicula>/del-carrito\", methods = ['GET'])\ndef delete(pelicula):\n\tif 'precio' not in session:\n\t\treturn redirect(url_for('carrito'))\n\tfor aux in peliculas:\n\t\tif aux['titulo'] == pelicula.replace(\"%20\", \" \"):\n\t\t\tif 'carrito' in session:\n\t\t\t\t#Quitar pelicula del carrito\n\t\t\t\ti = 0\n\t\t\t\tfor deter in session['carrito']:\n\t\t\t\t\tif(deter['nombre'] == pelicula.replace(\"%20\", \" \")):\n\t\t\t\t\t\tif(deter['cantidad'] == 1):\n\t\t\t\t\t\t\tdel(session['carrito'][i])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdeter['cantidad'] = deter['cantidad'] - 1\n\t\t\t\t\ti = i + 1\n\n\t\t\t\t#Modificar dinero del carrito\n\t\t\t\tif (len(session['carrito']) == 0):\n\t\t\t\t\tsession['precio'] = 0\n\t\t\t\telse:\n\t\t\t\t\tsession['precio'] = session['precio'] - aux['precio']\n\t\t\t\t\tif(session['precio'] < 0):\n\t\t\t\t\t\tsession['precio'] = 0\n\t\t\telse:\n\t\t\t\treturn redirect(url_for('carrito'))\n\treturn redirect(url_for('carrito'))\n\n#Pagina de cofirmacion del carrito de compra\n@app.route(\"/confirmacion\", methods = ['GET', 'POST'])\ndef confirmacion():\n\tif('user' in session):\n\t\tif(('carrito' in session) and (len(session['carrito']) > 0)):\n\t\t\treturn render_template('confirmacion.html', peliculas = peliculas)\n\t\telse:\n\t\t\treturn redirect(url_for('index'))\n\telse:\n\t\treturn redirect(url_for('index'))\n\n@app.route(\"/confirmacion/push\")\ndef push():\n\ttry:\n\t\tprocesarCompra(session, peliculas)\n\t\tsession.pop('carrito', None)\n\t\tsession.pop('precio', None)\n\t\treturn redirect(url_for('index'))\n\texcept Dinero as error:\n\t\tflash(\"Parece que no tienes dinero suficiente\")\n\t\treturn redirect(url_for('index'))\n\texcept Exception as error:\n\t\tflash(error)\n\t\treturn redirect(url_for('index', error = error))\n\n# Ruta del historial de compra de un usuario\n@app.route(\"/historial\", methods = ['GET', 'POST'])\ndef historial():\n\tif('user' not in session):\n\t\treturn redirect(url_for('index'))\n\telse:\n\t\tPATH = 'usuarios/' + session['user'] + '/historial.json'\n\t\thistorial = json.loads(open(PATH).read())\n\n\t\tif(request.method == 'POST'):\n\t\t\treturn redirect(url_for('index'), code = 307)\n\t\telse:\n\t\t\treturn render_template('historial.html', historial = historial)\n\n#Mantener cookies despues de cerrar el navegador\n@app.before_request\ndef session_management():\n\tsession.permanent = True\n\nif __name__ == \"__main__\":\n\tapp.secret_key = os.urandom(24)\n\tapp.run(host='0.0.0.0', port=5001, debug=True)\n \n",
"step-ids": [
9,
12,
13,
15,
16
]
}
|
[
9,
12,
13,
15,
16
] |
#processes are described by generator functions
#during the lifetime of a process, the process function(generator function)
#creates events and yields them
#when a process yields an event, it gets suspended
#Simpy resumes the process when the event is triggered
#multiple processes waiting on the same event is resumed in the same order
#it yielded the event
import simpy
def car(env):
# i = 0
# while i<=10:
while True:
print("The car will start parking at: ",env.now)
parking_timeout = 5
yield env.timeout(parking_timeout)
print("The car will start driving at: ",env.now)
driving_timeout = 2
yield env.timeout(driving_timeout)
# if i == 10:
# print("the car is done moving")
# yield env.timeout(1)
# i += 1
env = simpy.Environment()
env.process(car(env)) #the generator function creates the process called car
#env.run()
env.run(until=20)
|
normal
|
{
"blob_id": "892eb8d1802b01c035993232cc80c710211ab102",
"index": 802,
"step-1": "<mask token>\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\n<mask token>\nenv.process(car(env))\nenv.run(until=20)\n",
"step-3": "<mask token>\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\nenv = simpy.Environment()\nenv.process(car(env))\nenv.run(until=20)\n",
"step-4": "import simpy\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\nenv = simpy.Environment()\nenv.process(car(env))\nenv.run(until=20)\n",
"step-5": "#processes are described by generator functions\n#during the lifetime of a process, the process function(generator function) \n#creates events and yields them\n\n#when a process yields an event, it gets suspended\n#Simpy resumes the process when the event is triggered\n#multiple processes waiting on the same event is resumed in the same order\n#it yielded the event\n\nimport simpy\n\ndef car(env):\n # i = 0\n # while i<=10:\n while True:\n print(\"The car will start parking at: \",env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n\n print(\"The car will start driving at: \",env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n # if i == 10:\n # print(\"the car is done moving\")\n # yield env.timeout(1)\n # i += 1\n\n\nenv = simpy.Environment()\nenv.process(car(env)) #the generator function creates the process called car\n#env.run()\nenv.run(until=20)\n\n\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def domain_name(url):
while "https://" in url or "http://" in url or "www." in url:
url = url.replace("https://", ' ') if "https://" in url else url.replace("http://", ' ') if "http://" in url else url.replace("www.", ' ')
url = list(url)
for i in range(len(url)):
if url[i] == ".":
return "".join(url[0:i]).strip()
print(domain_name("https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python"))
|
normal
|
{
"blob_id": "2b9dfd0cfd62276330f1a4f983f318076f329437",
"index": 5026,
"step-1": "<mask token>\n",
"step-2": "def domain_name(url):\n while 'https://' in url or 'http://' in url or 'www.' in url:\n url = url.replace('https://', ' '\n ) if 'https://' in url else url.replace('http://', ' '\n ) if 'http://' in url else url.replace('www.', ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == '.':\n return ''.join(url[0:i]).strip()\n\n\n<mask token>\n",
"step-3": "def domain_name(url):\n while 'https://' in url or 'http://' in url or 'www.' in url:\n url = url.replace('https://', ' '\n ) if 'https://' in url else url.replace('http://', ' '\n ) if 'http://' in url else url.replace('www.', ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == '.':\n return ''.join(url[0:i]).strip()\n\n\nprint(domain_name(\n 'https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python'))\n",
"step-4": "def domain_name(url):\n while \"https://\" in url or \"http://\" in url or \"www.\" in url:\n url = url.replace(\"https://\", ' ') if \"https://\" in url else url.replace(\"http://\", ' ') if \"http://\" in url else url.replace(\"www.\", ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == \".\":\n return \"\".join(url[0:i]).strip()\nprint(domain_name(\"https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python\"))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Import this.
|
normal
|
{
"blob_id": "1f69bcd204c9be26756d964f4deb61296e40ff10",
"index": 9658,
"step-1": "# Import this.\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
"""Secret Garden tests."""
from secret_garden import Decoder, SecretGarden
import random
filename = "pr08_example_data.txt"
key = "Fat Chocobo"
d = Decoder(filename, key)
s = SecretGarden(filename, key)
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert "\n" not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [")-.7)-AOO", "-57)-0JASJAOOASJ", ")07)2AJSAJAJOAJJAAO", ".7)/AJSSAJSJOOSSOOOS",
"-,70", ",7)-,OAASSOSOAAASAAAAA", ".7).SOSAOJAOOO"]
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
def test_calculate_cipher_step():
"""
Test of function of calculating the cipher step.
:return:
"""
given_value = d.calculate_cipher_step()
assert type(given_value) == int
assert given_value == 1016
new_decoder = Decoder(filename, "HELLO THERE!")
new_value = new_decoder.calculate_cipher_step()
assert new_value != given_value
random_number = random.Random()
assert given_value != random_number
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert "\n" in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, "HELLO, STUDENTS.").decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),
random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
|
normal
|
{
"blob_id": "8cfab525ab3a86dd6964475d5621fdc7c6413e38",
"index": 8019,
"step-1": "<mask token>\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\n<mask token>\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-2": "<mask token>\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-3": "<mask token>\nfilename = 'pr08_example_data.txt'\nkey = 'Fat Chocobo'\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-4": "<mask token>\nfrom secret_garden import Decoder, SecretGarden\nimport random\nfilename = 'pr08_example_data.txt'\nkey = 'Fat Chocobo'\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-5": "\"\"\"Secret Garden tests.\"\"\"\nfrom secret_garden import Decoder, SecretGarden\nimport random\n\nfilename = \"pr08_example_data.txt\"\nkey = \"Fat Chocobo\"\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [\")-.7)-\u0006\u0006AOO\", \"-57)-0\u0006\u0006JASJAOOASJ\", \")07)2\u0006\u0006AJSAJAJOAJJAAO\", \".7)/\u0006\u0006AJSSAJSJOOSSOOOS\",\n \"-,70\u0006\u0006\", \",7)-,\u0006\u0006OAASSOSOAAASAAAAA\", \".7).\u0006\u0006SOSAOJAOOO\"]\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, \"HELLO THERE!\")\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert \"\\n\" in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),\n random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#打印ckpt或pb模型的tensor
# ckpt模型
#第一种方法:
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
checkpoint_path="/your/path"
print_tensors_in_checkpoint_file(checkpoint_path,tensor_name='', all_tensors=True, all_tensor_names=True)
#第二种方法:
from tensorflow.python import pywrap_tensorflow
checkpoint_path = "/your/path"
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
n=0
for key in var_to_shape_map:
print("tensor_name: ", key)
#print("****",reader.get_tensor(key))
n+=1
print("n:",n)
#pb模型
#打印tensor
import tensorflow as tf
import os
out_pb_path="/your/path"
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().as_graph_def().node]
m=0
for tensor_name in tensor_name_list:
print("pd:",tensor_name,'\n')
m+=1
print("m:",m)
#获得pb模型的图
import tensorflow as tf
from tensorflow.python.platform import gfile
model = "/your/path"
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
summaryWriter = tf.summary.FileWriter('log/', graph)
#命令tensorboard --logdir=/opt/data/hyh/tboard/tusimple_lanenet/vgg
|
normal
|
{
"blob_id": "50fab726b90f65a82c1206a8c7df955a8b76da99",
"index": 1572,
"step-1": "<mask token>\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',\n all_tensors=True, all_tensor_names=True)\n<mask token>\nfor key in var_to_shape_map:\n print('tensor_name: ', key)\n n += 1\nprint('n:', n)\n<mask token>\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\ncreate_graph()\n<mask token>\nfor tensor_name in tensor_name_list:\n print('pd:', tensor_name, '\\n')\n m += 1\nprint('m:', m)\n<mask token>\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\ntf.import_graph_def(graph_def, name='graph')\n<mask token>\n",
"step-3": "<mask token>\ncheckpoint_path = '/your/path'\nprint_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',\n all_tensors=True, all_tensor_names=True)\n<mask token>\ncheckpoint_path = '/your/path'\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nn = 0\nfor key in var_to_shape_map:\n print('tensor_name: ', key)\n n += 1\nprint('n:', n)\n<mask token>\nout_pb_path = '/your/path'\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\ncreate_graph()\ntensor_name_list = [tensor.name for tensor in tf.get_default_graph().\n as_graph_def().node]\nm = 0\nfor tensor_name in tensor_name_list:\n print('pd:', tensor_name, '\\n')\n m += 1\nprint('m:', m)\n<mask token>\nmodel = '/your/path'\ngraph = tf.get_default_graph()\ngraph_def = graph.as_graph_def()\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\ntf.import_graph_def(graph_def, name='graph')\nsummaryWriter = tf.summary.FileWriter('log/', graph)\n",
"step-4": "from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\ncheckpoint_path = '/your/path'\nprint_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',\n all_tensors=True, all_tensor_names=True)\nfrom tensorflow.python import pywrap_tensorflow\ncheckpoint_path = '/your/path'\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nn = 0\nfor key in var_to_shape_map:\n print('tensor_name: ', key)\n n += 1\nprint('n:', n)\nimport tensorflow as tf\nimport os\nout_pb_path = '/your/path'\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\ncreate_graph()\ntensor_name_list = [tensor.name for tensor in tf.get_default_graph().\n as_graph_def().node]\nm = 0\nfor tensor_name in tensor_name_list:\n print('pd:', tensor_name, '\\n')\n m += 1\nprint('m:', m)\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\nmodel = '/your/path'\ngraph = tf.get_default_graph()\ngraph_def = graph.as_graph_def()\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\ntf.import_graph_def(graph_def, name='graph')\nsummaryWriter = tf.summary.FileWriter('log/', graph)\n",
"step-5": "#打印ckpt或pb模型的tensor\r\n\r\n# ckpt模型 \r\n#第一种方法: \r\nfrom tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file \r\ncheckpoint_path=\"/your/path\"\r\nprint_tensors_in_checkpoint_file(checkpoint_path,tensor_name='', all_tensors=True, all_tensor_names=True)\r\n\r\n#第二种方法:\r\nfrom tensorflow.python import pywrap_tensorflow\r\ncheckpoint_path = \"/your/path\"\r\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\r\nvar_to_shape_map = reader.get_variable_to_shape_map()\r\nn=0\r\nfor key in var_to_shape_map:\r\n print(\"tensor_name: \", key)\r\n #print(\"****\",reader.get_tensor(key))\r\n n+=1\r\nprint(\"n:\",n)\r\n\r\n#pb模型\r\n#打印tensor\r\nimport tensorflow as tf\r\nimport os\r\nout_pb_path=\"/your/path\"\r\ndef create_graph():\r\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\r\n graph_def = tf.GraphDef()\r\n graph_def.ParseFromString(f.read())\r\n tf.import_graph_def(graph_def, name='')\r\n \r\ncreate_graph()\r\ntensor_name_list = [tensor.name for tensor in tf.get_default_graph().as_graph_def().node]\r\nm=0\r\nfor tensor_name in tensor_name_list:\r\n print(\"pd:\",tensor_name,'\\n')\r\n m+=1\r\nprint(\"m:\",m)\r\n\r\n#获得pb模型的图\r\nimport tensorflow as tf\r\nfrom tensorflow.python.platform import gfile\r\n\r\nmodel = \"/your/path\"\r\ngraph = tf.get_default_graph()\r\ngraph_def = graph.as_graph_def()\r\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\r\ntf.import_graph_def(graph_def, name='graph')\r\nsummaryWriter = tf.summary.FileWriter('log/', graph)\r\n\r\n#命令tensorboard --logdir=/opt/data/hyh/tboard/tusimple_lanenet/vgg\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def medianflt(img, i, j, msize, mr, mc):
pxls = []
for a in range(msize):
for b in range(msize):
mi = i + a - mr
mj = j + b - mc
pxls.append(img[mi][mj])
pxls.sort()
return pxls[msize * msize // 2]
def orderstatistic(img, row, col, msize=3):
rimg = copy.deepcopy(img)
mr = (msize - 1) // 2
mc = (msize - 1) // 2
for i in range(mr, row - mr - 1):
for j in range(mc, col - mc - 1):
rimg[i][j] = medianflt(img, i, j, msize, mr, mc)
return rimg
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.imshow('img', img)
<|reserved_special_token_0|>
def medianflt(img, i, j, msize, mr, mc):
pxls = []
for a in range(msize):
for b in range(msize):
mi = i + a - mr
mj = j + b - mc
pxls.append(img[mi][mj])
pxls.sort()
return pxls[msize * msize // 2]
def orderstatistic(img, row, col, msize=3):
rimg = copy.deepcopy(img)
mr = (msize - 1) // 2
mc = (msize - 1) // 2
for i in range(mr, row - mr - 1):
for j in range(mc, col - mc - 1):
rimg[i][j] = medianflt(img, i, j, msize, mr, mc)
return rimg
<|reserved_special_token_0|>
cv2.imshow('aimg', rimg)
cv2.waitKey(0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
imgpath = 'D:\\DIP-Project1/b.jpg'
img = cv2.imread(imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('img', img)
row = len(img)
col = len(img[0])
def medianflt(img, i, j, msize, mr, mc):
pxls = []
for a in range(msize):
for b in range(msize):
mi = i + a - mr
mj = j + b - mc
pxls.append(img[mi][mj])
pxls.sort()
return pxls[msize * msize // 2]
def orderstatistic(img, row, col, msize=3):
rimg = copy.deepcopy(img)
mr = (msize - 1) // 2
mc = (msize - 1) // 2
for i in range(mr, row - mr - 1):
for j in range(mc, col - mc - 1):
rimg[i][j] = medianflt(img, i, j, msize, mr, mc)
return rimg
d0 = 9
rimg = orderstatistic(img, row, col, d0)
cv2.imshow('aimg', rimg)
cv2.waitKey(0)
<|reserved_special_token_1|>
import cv2
import numpy as np
import copy
imgpath = 'D:\\DIP-Project1/b.jpg'
img = cv2.imread(imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('img', img)
row = len(img)
col = len(img[0])
def medianflt(img, i, j, msize, mr, mc):
pxls = []
for a in range(msize):
for b in range(msize):
mi = i + a - mr
mj = j + b - mc
pxls.append(img[mi][mj])
pxls.sort()
return pxls[msize * msize // 2]
def orderstatistic(img, row, col, msize=3):
rimg = copy.deepcopy(img)
mr = (msize - 1) // 2
mc = (msize - 1) // 2
for i in range(mr, row - mr - 1):
for j in range(mc, col - mc - 1):
rimg[i][j] = medianflt(img, i, j, msize, mr, mc)
return rimg
d0 = 9
rimg = orderstatistic(img, row, col, d0)
cv2.imshow('aimg', rimg)
cv2.waitKey(0)
|
flexible
|
{
"blob_id": "cfcce8c760f6ba49ce450d78782cb8f3b5fc1188",
"index": 2857,
"step-1": "<mask token>\n\n\ndef medianflt(img, i, j, msize, mr, mc):\n pxls = []\n for a in range(msize):\n for b in range(msize):\n mi = i + a - mr\n mj = j + b - mc\n pxls.append(img[mi][mj])\n pxls.sort()\n return pxls[msize * msize // 2]\n\n\ndef orderstatistic(img, row, col, msize=3):\n rimg = copy.deepcopy(img)\n mr = (msize - 1) // 2\n mc = (msize - 1) // 2\n for i in range(mr, row - mr - 1):\n for j in range(mc, col - mc - 1):\n rimg[i][j] = medianflt(img, i, j, msize, mr, mc)\n return rimg\n\n\n<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('img', img)\n<mask token>\n\n\ndef medianflt(img, i, j, msize, mr, mc):\n pxls = []\n for a in range(msize):\n for b in range(msize):\n mi = i + a - mr\n mj = j + b - mc\n pxls.append(img[mi][mj])\n pxls.sort()\n return pxls[msize * msize // 2]\n\n\ndef orderstatistic(img, row, col, msize=3):\n rimg = copy.deepcopy(img)\n mr = (msize - 1) // 2\n mc = (msize - 1) // 2\n for i in range(mr, row - mr - 1):\n for j in range(mc, col - mc - 1):\n rimg[i][j] = medianflt(img, i, j, msize, mr, mc)\n return rimg\n\n\n<mask token>\ncv2.imshow('aimg', rimg)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nimgpath = 'D:\\\\DIP-Project1/b.jpg'\nimg = cv2.imread(imgpath)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ncv2.imshow('img', img)\nrow = len(img)\ncol = len(img[0])\n\n\ndef medianflt(img, i, j, msize, mr, mc):\n pxls = []\n for a in range(msize):\n for b in range(msize):\n mi = i + a - mr\n mj = j + b - mc\n pxls.append(img[mi][mj])\n pxls.sort()\n return pxls[msize * msize // 2]\n\n\ndef orderstatistic(img, row, col, msize=3):\n rimg = copy.deepcopy(img)\n mr = (msize - 1) // 2\n mc = (msize - 1) // 2\n for i in range(mr, row - mr - 1):\n for j in range(mc, col - mc - 1):\n rimg[i][j] = medianflt(img, i, j, msize, mr, mc)\n return rimg\n\n\nd0 = 9\nrimg = orderstatistic(img, row, col, d0)\ncv2.imshow('aimg', rimg)\ncv2.waitKey(0)\n",
"step-4": "import cv2\nimport numpy as np\nimport copy\nimgpath = 'D:\\\\DIP-Project1/b.jpg'\nimg = cv2.imread(imgpath)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ncv2.imshow('img', img)\nrow = len(img)\ncol = len(img[0])\n\n\ndef medianflt(img, i, j, msize, mr, mc):\n pxls = []\n for a in range(msize):\n for b in range(msize):\n mi = i + a - mr\n mj = j + b - mc\n pxls.append(img[mi][mj])\n pxls.sort()\n return pxls[msize * msize // 2]\n\n\ndef orderstatistic(img, row, col, msize=3):\n rimg = copy.deepcopy(img)\n mr = (msize - 1) // 2\n mc = (msize - 1) // 2\n for i in range(mr, row - mr - 1):\n for j in range(mc, col - mc - 1):\n rimg[i][j] = medianflt(img, i, j, msize, mr, mc)\n return rimg\n\n\nd0 = 9\nrimg = orderstatistic(img, row, col, d0)\ncv2.imshow('aimg', rimg)\ncv2.waitKey(0)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
exclude = ['organization', 'private_user', 'is_public',
'is_user_private', 'display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),
required=False)
address = forms.CharField(widget=forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude', 'longitude', 'is_active']
<|reserved_special_token_0|>
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if request.method == 'POST':
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
data = DocUploadForm(request.POST, request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if user.userType == 1:
return HttpResponseRedirect(reverse('documentListOwner'))
elif user.userType == 2:
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if user.userType == 1:
context = {'docUploadForm': form}
return render(request, 'printo_app/docUpload-owner.html', context)
if user.userType == 2:
shopRate = Shop.objects.get(employee=request.user).rate
context = {'docUploadForm': form, 'rate': shopRate}
return render(request, 'printo_app/docUpload-emp.html', context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
docList = Document.objects.filter(is_public=True).filter(organization
=org)
context = {'docs': docList}
return render(request, 'printo_app/docList-owner.html', context)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org
).order_by('-uploadedDate')
context = {'docs': docList}
return render(request, 'printo_app/docList-emp.html', context)
<|reserved_special_token_0|>
@login_required
def docDetail(request, docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance=docDetail)
context = {'docEditForm': form, 'doc': docDetail}
return render(request, 'printo_app/docDetail.html', context)
@login_required
def docEditSave(request, docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)
docDetail.save()
context = {'msg': docDetail}
return HttpResponseRedirect(reverse('documentList'))
<|reserved_special_token_0|>
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST, instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
<|reserved_special_token_0|>
@login_required
def orderList(request, shopid=None):
shop = Shop.objects.get(employee=request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {'orders': orderList, 'new_count': new_count, 'pending_count':
pending_count, 'completed_count': completed_count,
'delivered_count': delivered_count}
return render(request, 'printo_app/ordersList.html', context)
<|reserved_special_token_0|>
@login_required
def shopCreate(request):
uprofile = get_object_or_404(UserProfile, user=request.user)
if uprofile.userType == 1:
pass
else:
return HttpResponse("You don't have permission")
if request.method == 'POST':
form = ShopForm(request.POST)
import ipdb
ipdb.set_trace()
if form.is_valid():
username = form.cleaned_data.get('username', None)
password = form.cleaned_data.get('password', None)
telephone = form.cleaned_data.get('telephone', None)
email = request.user.email
if username != None:
user = User.objects.create_user(username=username, email=
email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone != None:
userprofile.telephone = telephone
userprofile.save()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner=request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get('shopName', None)
shopprofile.pincode = form.cleaned_data.get('pincode', None)
shopprofile.address = form.cleaned_data.get('address', None)
shopprofile.latitude = form.cleaned_data.get('latitude', None)
shopprofile.longitude = form.cleaned_data.get('longitude', None)
shopprofile.telephone = form.cleaned_data.get('telephone', None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get('services', None)
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = {'shopCreateForm': shopform, 'userForm': userform}
return render(request, 'printo_app/shopCreate.html', context)
<|reserved_special_token_0|>
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username=self.cleaned_data['email'])
raise forms.ValidationError(_(
u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data['email']
def index_main(request):
if request.user.is_authenticated() == True:
return HttpResponseRedirect(reverse('main'))
else:
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data['email'],
form.cleaned_data['email'], form.cleaned_data['password'])
profile = UserProfile()
profile.user = u
profile.userType = 1
profile.mobile = form.cleaned_data['mobile']
profile.save()
org = Organization()
org.owner = u
org.save()
return HttpResponse('Thanks')
else:
form = RegistrationForm()
return render(request, 'index_main.html', context={'form': form})
def docListOwner(request):
pass
<|reserved_special_token_0|>
def get_universitys(request):
p = {}
for c in University.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_publishers(request):
p = {}
for c in Publisher.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_0|>
def get_topics(request):
p = {}
for c in Topic.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_0|>
def get_cities(request):
p = {}
for c in City.objects.all():
p[c.name] = str(c.latitude), str(c.longitude)
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
exclude = ['organization', 'private_user', 'is_public',
'is_user_private', 'display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),
required=False)
address = forms.CharField(widget=forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude', 'longitude', 'is_active']
<|reserved_special_token_0|>
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if request.method == 'POST':
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
data = DocUploadForm(request.POST, request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if user.userType == 1:
return HttpResponseRedirect(reverse('documentListOwner'))
elif user.userType == 2:
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if user.userType == 1:
context = {'docUploadForm': form}
return render(request, 'printo_app/docUpload-owner.html', context)
if user.userType == 2:
shopRate = Shop.objects.get(employee=request.user).rate
context = {'docUploadForm': form, 'rate': shopRate}
return render(request, 'printo_app/docUpload-emp.html', context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
docList = Document.objects.filter(is_public=True).filter(organization
=org)
context = {'docs': docList}
return render(request, 'printo_app/docList-owner.html', context)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org
).order_by('-uploadedDate')
context = {'docs': docList}
return render(request, 'printo_app/docList-emp.html', context)
<|reserved_special_token_0|>
@login_required
def docDetail(request, docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance=docDetail)
context = {'docEditForm': form, 'doc': docDetail}
return render(request, 'printo_app/docDetail.html', context)
@login_required
def docEditSave(request, docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)
docDetail.save()
context = {'msg': docDetail}
return HttpResponseRedirect(reverse('documentList'))
<|reserved_special_token_0|>
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST, instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
<|reserved_special_token_0|>
@login_required
def orderList(request, shopid=None):
shop = Shop.objects.get(employee=request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {'orders': orderList, 'new_count': new_count, 'pending_count':
pending_count, 'completed_count': completed_count,
'delivered_count': delivered_count}
return render(request, 'printo_app/ordersList.html', context)
@login_required
def shopList(request):
org = Organization.objects.get(owner=request.user)
shops = Shop.objects.filter(owner=org)
context = {'shops': shops}
return render(request, 'printo_app/shopList.html', context)
@login_required
def shopCreate(request):
uprofile = get_object_or_404(UserProfile, user=request.user)
if uprofile.userType == 1:
pass
else:
return HttpResponse("You don't have permission")
if request.method == 'POST':
form = ShopForm(request.POST)
import ipdb
ipdb.set_trace()
if form.is_valid():
username = form.cleaned_data.get('username', None)
password = form.cleaned_data.get('password', None)
telephone = form.cleaned_data.get('telephone', None)
email = request.user.email
if username != None:
user = User.objects.create_user(username=username, email=
email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone != None:
userprofile.telephone = telephone
userprofile.save()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner=request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get('shopName', None)
shopprofile.pincode = form.cleaned_data.get('pincode', None)
shopprofile.address = form.cleaned_data.get('address', None)
shopprofile.latitude = form.cleaned_data.get('latitude', None)
shopprofile.longitude = form.cleaned_data.get('longitude', None)
shopprofile.telephone = form.cleaned_data.get('telephone', None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get('services', None)
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = {'shopCreateForm': shopform, 'userForm': userform}
return render(request, 'printo_app/shopCreate.html', context)
<|reserved_special_token_0|>
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username=self.cleaned_data['email'])
raise forms.ValidationError(_(
u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data['email']
def index_main(request):
if request.user.is_authenticated() == True:
return HttpResponseRedirect(reverse('main'))
else:
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data['email'],
form.cleaned_data['email'], form.cleaned_data['password'])
profile = UserProfile()
profile.user = u
profile.userType = 1
profile.mobile = form.cleaned_data['mobile']
profile.save()
org = Organization()
org.owner = u
org.save()
return HttpResponse('Thanks')
else:
form = RegistrationForm()
return render(request, 'index_main.html', context={'form': form})
def docListOwner(request):
pass
<|reserved_special_token_0|>
def get_universitys(request):
p = {}
for c in University.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_publishers(request):
p = {}
for c in Publisher.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_0|>
def get_topics(request):
p = {}
for c in Topic.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_0|>
def get_cities(request):
p = {}
for c in City.objects.all():
p[c.name] = str(c.latitude), str(c.longitude)
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
exclude = ['organization', 'private_user', 'is_public',
'is_user_private', 'display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),
required=False)
address = forms.CharField(widget=forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude', 'longitude', 'is_active']
@login_required
def indexEmp(request):
context = {'shop': shopid}
return render(request, 'index.html', context)
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if request.method == 'POST':
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
data = DocUploadForm(request.POST, request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if user.userType == 1:
return HttpResponseRedirect(reverse('documentListOwner'))
elif user.userType == 2:
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if user.userType == 1:
context = {'docUploadForm': form}
return render(request, 'printo_app/docUpload-owner.html', context)
if user.userType == 2:
shopRate = Shop.objects.get(employee=request.user).rate
context = {'docUploadForm': form, 'rate': shopRate}
return render(request, 'printo_app/docUpload-emp.html', context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
docList = Document.objects.filter(is_public=True).filter(organization
=org)
context = {'docs': docList}
return render(request, 'printo_app/docList-owner.html', context)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org
).order_by('-uploadedDate')
context = {'docs': docList}
return render(request, 'printo_app/docList-emp.html', context)
<|reserved_special_token_0|>
@login_required
def docDetail(request, docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance=docDetail)
context = {'docEditForm': form, 'doc': docDetail}
return render(request, 'printo_app/docDetail.html', context)
@login_required
def docEditSave(request, docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)
docDetail.save()
context = {'msg': docDetail}
return HttpResponseRedirect(reverse('documentList'))
@login_required
def shopProfile(request, shopid=None):
context = {}
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
pass
elif user.userType == 2:
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm()
context = {'shopForm': shopForm, 'details': shop}
return render(request, 'printo_app/shopProfile.html', context)
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST, instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
@login_required
def indexEmp(request, shopid=None):
user = UserProfile.objects.get(user=request.user)
is_owner = False
if user.userType == 1:
is_owner = True
elif user.userType == 2:
is_owner = False
context = {'is_owner': is_owner}
return HttpResponseRedirect(reverse('orderList'))
@login_required
def orderList(request, shopid=None):
shop = Shop.objects.get(employee=request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {'orders': orderList, 'new_count': new_count, 'pending_count':
pending_count, 'completed_count': completed_count,
'delivered_count': delivered_count}
return render(request, 'printo_app/ordersList.html', context)
@login_required
def shopList(request):
org = Organization.objects.get(owner=request.user)
shops = Shop.objects.filter(owner=org)
context = {'shops': shops}
return render(request, 'printo_app/shopList.html', context)
@login_required
def shopCreate(request):
uprofile = get_object_or_404(UserProfile, user=request.user)
if uprofile.userType == 1:
pass
else:
return HttpResponse("You don't have permission")
if request.method == 'POST':
form = ShopForm(request.POST)
import ipdb
ipdb.set_trace()
if form.is_valid():
username = form.cleaned_data.get('username', None)
password = form.cleaned_data.get('password', None)
telephone = form.cleaned_data.get('telephone', None)
email = request.user.email
if username != None:
user = User.objects.create_user(username=username, email=
email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone != None:
userprofile.telephone = telephone
userprofile.save()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner=request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get('shopName', None)
shopprofile.pincode = form.cleaned_data.get('pincode', None)
shopprofile.address = form.cleaned_data.get('address', None)
shopprofile.latitude = form.cleaned_data.get('latitude', None)
shopprofile.longitude = form.cleaned_data.get('longitude', None)
shopprofile.telephone = form.cleaned_data.get('telephone', None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get('services', None)
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = {'shopCreateForm': shopform, 'userForm': userform}
return render(request, 'printo_app/shopCreate.html', context)
@login_required
def index(request):
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
return HttpResponseRedirect(reverse('OwnerMain'))
elif user.userType == 2:
return HttpResponseRedirect(reverse('EmployeeMain'))
return None
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username=self.cleaned_data['email'])
raise forms.ValidationError(_(
u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data['email']
def index_main(request):
if request.user.is_authenticated() == True:
return HttpResponseRedirect(reverse('main'))
else:
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data['email'],
form.cleaned_data['email'], form.cleaned_data['password'])
profile = UserProfile()
profile.user = u
profile.userType = 1
profile.mobile = form.cleaned_data['mobile']
profile.save()
org = Organization()
org.owner = u
org.save()
return HttpResponse('Thanks')
else:
form = RegistrationForm()
return render(request, 'index_main.html', context={'form': form})
def docListOwner(request):
pass
def docUploadOwner(request):
pass
@login_required
def indexOwner(request):
context = {}
return render(request, 'ownerMain.html', context)
<|reserved_special_token_0|>
def get_universitys(request):
p = {}
for c in University.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_publishers(request):
p = {}
for c in Publisher.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_courses(request):
p = {}
for c in Course.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_topics(request):
p = {}
for c in Topic.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_0|>
def get_services(request):
p = {}
for c in Service.objects.all():
p[c.name] = c.name, c.id
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_0|>
def get_cities(request):
p = {}
for c in City.objects.all():
p[c.name] = str(c.latitude), str(c.longitude)
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
exclude = ['organization', 'private_user', 'is_public',
'is_user_private', 'display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),
required=False)
address = forms.CharField(widget=forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude', 'longitude', 'is_active']
@login_required
def indexEmp(request):
context = {'shop': shopid}
return render(request, 'index.html', context)
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if request.method == 'POST':
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
data = DocUploadForm(request.POST, request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if user.userType == 1:
return HttpResponseRedirect(reverse('documentListOwner'))
elif user.userType == 2:
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if user.userType == 1:
context = {'docUploadForm': form}
return render(request, 'printo_app/docUpload-owner.html', context)
if user.userType == 2:
shopRate = Shop.objects.get(employee=request.user).rate
context = {'docUploadForm': form, 'rate': shopRate}
return render(request, 'printo_app/docUpload-emp.html', context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
docList = Document.objects.filter(is_public=True).filter(organization
=org)
context = {'docs': docList}
return render(request, 'printo_app/docList-owner.html', context)
elif user.userType == 2:
org = Organization.objects.get(employee=request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org
).order_by('-uploadedDate')
context = {'docs': docList}
return render(request, 'printo_app/docList-emp.html', context)
@login_required
def docListOwner(request):
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
org = Organization.objects.get(owner=request.user)
docList = Document.objects.filter(is_public=True).filter(organization
=org)
context = {'docs': docList}
return render(request, 'printo_app/docList-owner.html', context)
@login_required
def docDetail(request, docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance=docDetail)
context = {'docEditForm': form, 'doc': docDetail}
return render(request, 'printo_app/docDetail.html', context)
@login_required
def docEditSave(request, docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)
docDetail.save()
context = {'msg': docDetail}
return HttpResponseRedirect(reverse('documentList'))
@login_required
def shopProfile(request, shopid=None):
context = {}
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
pass
elif user.userType == 2:
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm()
context = {'shopForm': shopForm, 'details': shop}
return render(request, 'printo_app/shopProfile.html', context)
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST, instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
@login_required
def indexEmp(request, shopid=None):
user = UserProfile.objects.get(user=request.user)
is_owner = False
if user.userType == 1:
is_owner = True
elif user.userType == 2:
is_owner = False
context = {'is_owner': is_owner}
return HttpResponseRedirect(reverse('orderList'))
@login_required
def orderList(request, shopid=None):
shop = Shop.objects.get(employee=request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {'orders': orderList, 'new_count': new_count, 'pending_count':
pending_count, 'completed_count': completed_count,
'delivered_count': delivered_count}
return render(request, 'printo_app/ordersList.html', context)
@login_required
def shopList(request):
org = Organization.objects.get(owner=request.user)
shops = Shop.objects.filter(owner=org)
context = {'shops': shops}
return render(request, 'printo_app/shopList.html', context)
@login_required
def shopCreate(request):
uprofile = get_object_or_404(UserProfile, user=request.user)
if uprofile.userType == 1:
pass
else:
return HttpResponse("You don't have permission")
if request.method == 'POST':
form = ShopForm(request.POST)
import ipdb
ipdb.set_trace()
if form.is_valid():
username = form.cleaned_data.get('username', None)
password = form.cleaned_data.get('password', None)
telephone = form.cleaned_data.get('telephone', None)
email = request.user.email
if username != None:
user = User.objects.create_user(username=username, email=
email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone != None:
userprofile.telephone = telephone
userprofile.save()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner=request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get('shopName', None)
shopprofile.pincode = form.cleaned_data.get('pincode', None)
shopprofile.address = form.cleaned_data.get('address', None)
shopprofile.latitude = form.cleaned_data.get('latitude', None)
shopprofile.longitude = form.cleaned_data.get('longitude', None)
shopprofile.telephone = form.cleaned_data.get('telephone', None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get('services', None)
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = {'shopCreateForm': shopform, 'userForm': userform}
return render(request, 'printo_app/shopCreate.html', context)
@login_required
def index(request):
user = UserProfile.objects.get(user=request.user)
if user.userType == 1:
return HttpResponseRedirect(reverse('OwnerMain'))
elif user.userType == 2:
return HttpResponseRedirect(reverse('EmployeeMain'))
return None
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': 'Password'}, render_value=False), label
=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data and 'password' in self.
cleaned_data):
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(
u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username=self.cleaned_data['email'])
raise forms.ValidationError(_(
u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data['email']
def index_main(request):
if request.user.is_authenticated() == True:
return HttpResponseRedirect(reverse('main'))
else:
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data['email'],
form.cleaned_data['email'], form.cleaned_data['password'])
profile = UserProfile()
profile.user = u
profile.userType = 1
profile.mobile = form.cleaned_data['mobile']
profile.save()
org = Organization()
org.owner = u
org.save()
return HttpResponse('Thanks')
else:
form = RegistrationForm()
return render(request, 'index_main.html', context={'form': form})
def docListOwner(request):
pass
def docUploadOwner(request):
pass
@login_required
def indexOwner(request):
context = {}
return render(request, 'ownerMain.html', context)
<|reserved_special_token_0|>
def get_universitys(request):
p = {}
for c in University.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_publishers(request):
p = {}
for c in Publisher.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_courses(request):
p = {}
for c in Course.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
def get_topics(request):
p = {}
for c in Topic.objects.all():
p[c.name] = c.name, c.pk
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_0|>
def get_services(request):
p = {}
for c in Service.objects.all():
p[c.name] = c.name, c.id
return HttpResponse(json.dumps(p), content_type='application/json')
def get_colleges(request):
p = {}
for c in College.objects.all():
p[c.name] = str(c.latitude), str(c.longitude)
return HttpResponse(json.dumps(p), content_type='application/json')
def get_cities(request):
p = {}
for c in City.objects.all():
p[c.name] = str(c.latitude), str(c.longitude)
return HttpResponse(json.dumps(p), content_type='application/json')
<|reserved_special_token_1|>
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from .models import Document, Organization, UserProfile, Shop
#from .forms import DocUploadForm, ShopEditForm
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import *
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
# widgets = {'tags' : autocomplete_light.MultipleChoiceWidget('TagAutocomplete')}
# autocomplete_fields = ('tags','topic','university',)
exclude = ['organization','private_user','is_public','is_user_private','display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'), required=False)
address = forms.CharField(widget= forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'User Name'}),
label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
# def clean_email(self):
# if 'email' in self.cleaned_data:
# try:
# user = User.objects.get(username= self.cleaned_data["username"])
# raise forms.ValidationError(_(u'Already this Username is Registered'))
# except User.DoesNotExist:
# pass
# return self.cleaned_data["email"]
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude','longitude','is_active']
@login_required
def indexEmp(request):
context = {'shop':shopid}
return render(request,'index.html',context)
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if(request.method=='POST'):
# import ipdb; ipdb.set_trace();
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
data = DocUploadForm(request.POST,request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if(user.userType == 1 ):
return HttpResponseRedirect(reverse('documentListOwner'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if(user.userType == 1 ):
context = { "docUploadForm" : form}
return render(request,'printo_app/docUpload-owner.html',context)
if(user.userType == 2 ):
shopRate = Shop.objects.get(employee=request.user).rate
context = { "docUploadForm" : form,"rate":shopRate }
return render(request,'printo_app/docUpload-emp.html',context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org).order_by('-uploadedDate')
context = {"docs":docList}
return render(request,'printo_app/docList-emp.html',context)
@login_required
def docListOwner(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
@login_required
def docDetail(request,docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance = docDetail)
context = {"docEditForm":form,"doc":docDetail}
return render(request,'printo_app/docDetail.html',context)
@login_required
def docEditSave(request,docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST,request.FILES,instance=currentDoc)
docDetail.save()
context = { "msg":docDetail }
return HttpResponseRedirect(reverse('documentList'))
@login_required
def shopProfile(request,shopid=None):
context = {}
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
pass
elif(user.userType == 2):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm()
context = {'shopForm':shopForm,'details':shop}
return render(request,'printo_app/shopProfile.html',context)
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST,instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
@login_required
def indexEmp(request,shopid=None):
user = UserProfile.objects.get(user=request.user)
is_owner = False
if(user.userType == 1):
is_owner = True
elif(user.userType == 2):
is_owner = False
context = {'is_owner':is_owner}
return HttpResponseRedirect(reverse('orderList'))
@login_required
def orderList(request,shopid=None):
shop = Shop.objects.get(employee = request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {"orders":orderList,"new_count":new_count,"pending_count":pending_count,"completed_count":completed_count,"delivered_count":delivered_count}
return render(request,'printo_app/ordersList.html',context)
@login_required
def shopList(request):
org = Organization.objects.get(owner = request.user)
shops = Shop.objects.filter(owner = org )
context={'shops' : shops}
return render(request,'printo_app/shopList.html',context)
@login_required
def shopCreate(request):
uprofile =get_object_or_404(UserProfile, user=request.user)
if uprofile.userType==1:
pass
else:
return HttpResponse("You don't have permission")
if(request.method=='POST'):
form = ShopForm(request.POST)
import ipdb; ipdb.set_trace()
if(form.is_valid()):
username = form.cleaned_data.get("username", None)
password = form.cleaned_data.get("password", None)
telephone = form.cleaned_data.get("telephone", None)
email = request.user.email
# email = form.cleaned_data.get("email", None)
# if email == None:
# email = request.user.email
if username != None:
user = User.objects.create_user(username=username,email=email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone !=None:
userprofile.telephone = telephone
userprofile.save()
# shop = Shop()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner = request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get("shopName", None)
shopprofile.pincode = form.cleaned_data.get("pincode",None)
shopprofile.address = form.cleaned_data.get("address",None)
shopprofile.latitude = form.cleaned_data.get("latitude",None)
shopprofile.longitude = form.cleaned_data.get("longitude",None)
shopprofile.telephone = form.cleaned_data.get("telephone",None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get("services",None)
# shop.save_m2m()
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = { 'shopCreateForm' : shopform, 'userForm' : userform }
return render(request,'printo_app/shopCreate.html',context)
@login_required
def index(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
return HttpResponseRedirect(reverse('OwnerMain'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('EmployeeMain'))
return None
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username= self.cleaned_data["email"])
raise forms.ValidationError(_(u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data["email"]
def index_main(request):
if request.user.is_authenticated()==True:
return HttpResponseRedirect(reverse("main"))
else:
if request.method=="POST":
form= RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data["email"], form.cleaned_data["email"], form.cleaned_data["password"],)
# Send a mail with verification code
profile = UserProfile()
profile.user =u
profile.userType =1
profile.mobile = form.cleaned_data["mobile"]
profile.save()
org= Organization()
org.owner = u
org.save()
return HttpResponse("Thanks")
else:
form =RegistrationForm()
return render( request, 'index_main.html', context={"form":form},)
def docListOwner(request):
pass
def docUploadOwner(request):
pass
@login_required
def indexOwner(request):
context = {}
return render(request,'ownerMain.html',context)
# ====================================
# DATA PROVIDERS
# ====================================
import json
from django.core import serializers
def get_universitys(request):
p={}
# import ipdb; ipdb.set_trace()
for c in University.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_publishers(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Publisher.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_courses(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Course.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_topics(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Topic.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_tags(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Tag.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_services(request):
p={}
# import ipdb; ipdb.set_trace()
for c in Service.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_colleges(request):
p={}
for c in College.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
def get_cities(request):
p={}
for c in City.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
|
flexible
|
{
"blob_id": "d2c5d306591216e100b5bd8e8822b24fd137d092",
"index": 9208,
"step-1": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n<mask token>\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n<mask token>\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n<mask token>\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n<mask token>\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-2": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n<mask token>\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n<mask token>\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n<mask token>\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n<mask token>\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-3": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n@login_required\ndef indexEmp(request):\n context = {'shop': shopid}\n return render(request, 'index.html', context)\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n@login_required\ndef shopProfile(request, shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n pass\n elif user.userType == 2:\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm': shopForm, 'details': shop}\n return render(request, 'printo_app/shopProfile.html', context)\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n@login_required\ndef indexEmp(request, shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if user.userType == 1:\n is_owner = True\n elif user.userType == 2:\n is_owner = False\n context = {'is_owner': is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\ndef docUploadOwner(request):\n pass\n\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request, 'ownerMain.html', context)\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_courses(request):\n p = {}\n for c in Course.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_services(request):\n p = {}\n for c in Service.objects.all():\n p[c.name] = c.name, c.id\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-4": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n@login_required\ndef indexEmp(request):\n context = {'shop': shopid}\n return render(request, 'index.html', context)\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n@login_required\ndef docListOwner(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n@login_required\ndef shopProfile(request, shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n pass\n elif user.userType == 2:\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm': shopForm, 'details': shop}\n return render(request, 'printo_app/shopProfile.html', context)\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n@login_required\ndef indexEmp(request, shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if user.userType == 1:\n is_owner = True\n elif user.userType == 2:\n is_owner = False\n context = {'is_owner': is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\ndef docUploadOwner(request):\n pass\n\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request, 'ownerMain.html', context)\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_courses(request):\n p = {}\n for c in Course.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_services(request):\n p = {}\n for c in Service.objects.all():\n p[c.name] = c.name, c.id\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_colleges(request):\n p = {}\n for c in College.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-5": "from django.shortcuts import render\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .models import Document, Organization, UserProfile, Shop\n#from .forms import DocUploadForm, ShopEditForm\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom django.shortcuts import get_object_or_404\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom .models import *\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n class Meta:\n model = Document\n # widgets = {'tags' : autocomplete_light.MultipleChoiceWidget('TagAutocomplete')}\n # autocomplete_fields = ('tags','topic','university',)\n exclude = ['organization','private_user','is_public','is_user_private','display']\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),\n label=_(u'email address'), required=False)\n \n address = forms.CharField(widget= forms.Textarea())\n pincode = forms.IntegerField()\n \n nearest_college = forms.CharField(max_length=200, required=False)\n \n nearest_town = forms.CharField(max_length=200, required=False)\n \n telephone = forms.CharField(max_length=14)\n \n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'User Name'}),\n label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),\n label=_(u'Password'))\n \n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n \n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data \n\n\n # def clean_email(self):\n # if 'email' in self.cleaned_data:\n\n # try:\n # user = User.objects.get(username= self.cleaned_data[\"username\"])\n # raise forms.ValidationError(_(u'Already this Username is Registered'))\n \n # except User.DoesNotExist:\n \n # pass\n # return self.cleaned_data[\"email\"]\n\nclass ShopEditForm(forms.ModelForm):\n class Meta:\n model = Shop\n exclude = ['latitude','longitude','is_active']\n\n@login_required\ndef indexEmp(request):\n context = {'shop':shopid}\n return render(request,'index.html',context)\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if(request.method=='POST'):\n # import ipdb; ipdb.set_trace();\n \n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n elif(user.userType == 2):\n org = Organization.objects.get(employee = request.user)\n\n data = DocUploadForm(request.POST,request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m() \n if(user.userType == 1 ):\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif(user.userType == 2):\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if(user.userType == 1 ):\n context = { \"docUploadForm\" : form}\n return render(request,'printo_app/docUpload-owner.html',context)\n if(user.userType == 2 ):\n shopRate = Shop.objects.get(employee=request.user).rate\n context = { \"docUploadForm\" : form,\"rate\":shopRate }\n return render(request,'printo_app/docUpload-emp.html',context)\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org)\n context = {\"docs\":docList}\n return render(request,'printo_app/docList-owner.html',context)\n elif(user.userType == 2):\n org = Organization.objects.get(employee = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org).order_by('-uploadedDate')\n \n context = {\"docs\":docList}\n return render(request,'printo_app/docList-emp.html',context)\n\n@login_required\ndef docListOwner(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org)\n context = {\"docs\":docList}\n return render(request,'printo_app/docList-owner.html',context)\n\n@login_required\ndef docDetail(request,docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance = docDetail)\n context = {\"docEditForm\":form,\"doc\":docDetail}\n return render(request,'printo_app/docDetail.html',context)\n\n@login_required\ndef docEditSave(request,docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST,request.FILES,instance=currentDoc)\n docDetail.save() \n context = { \"msg\":docDetail }\n return HttpResponseRedirect(reverse('documentList'))\n\n@login_required\ndef shopProfile(request,shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1):\n pass\n elif(user.userType == 2):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm':shopForm,'details':shop}\n return render(request,'printo_app/shopProfile.html',context)\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST,instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n@login_required\ndef indexEmp(request,shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if(user.userType == 1):\n is_owner = True\n elif(user.userType == 2):\n is_owner = False\n context = {'is_owner':is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n@login_required\ndef orderList(request,shopid=None):\n shop = Shop.objects.get(employee = request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {\"orders\":orderList,\"new_count\":new_count,\"pending_count\":pending_count,\"completed_count\":completed_count,\"delivered_count\":delivered_count}\n return render(request,'printo_app/ordersList.html',context)\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner = request.user)\n shops = Shop.objects.filter(owner = org )\n context={'shops' : shops}\n return render(request,'printo_app/shopList.html',context)\n\n@login_required\ndef shopCreate(request):\n uprofile =get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType==1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n \n if(request.method=='POST'):\n form = ShopForm(request.POST)\n import ipdb; ipdb.set_trace()\n if(form.is_valid()):\n username = form.cleaned_data.get(\"username\", None)\n password = form.cleaned_data.get(\"password\", None)\n telephone = form.cleaned_data.get(\"telephone\", None)\n email = request.user.email\n # email = form.cleaned_data.get(\"email\", None)\n # if email == None:\n # email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username,email=email, password=password)\n \n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone !=None:\n userprofile.telephone = telephone \n userprofile.save()\n \n # shop = Shop()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner = request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get(\"shopName\", None)\n shopprofile.pincode = form.cleaned_data.get(\"pincode\",None)\n shopprofile.address = form.cleaned_data.get(\"address\",None)\n shopprofile.latitude = form.cleaned_data.get(\"latitude\",None)\n shopprofile.longitude = form.cleaned_data.get(\"longitude\",None)\n shopprofile.telephone = form.cleaned_data.get(\"telephone\",None)\n \n shopprofile.save()\n shopprofile.services = form.cleaned_data.get(\"services\",None)\n # shop.save_m2m()\n\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n\n shopform = ShopForm()\n context = { 'shopCreateForm' : shopform, 'userForm' : userform }\n return render(request,'printo_app/shopCreate.html',context)\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1):\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif(user.userType == 2):\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\nclass RegistrationForm(forms.Form):\n \n \n \n email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),\n label=_(u'email address'))\n \n password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),\n label=_(u'Password'))\n \n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n \n mobile = forms.CharField(max_length=14)\n \n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data \n\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n \n try:\n user = User.objects.get(username= self.cleaned_data[\"email\"])\n raise forms.ValidationError(_(u'Already Email Address is registered'))\n \n except User.DoesNotExist:\n pass\n return self.cleaned_data[\"email\"]\n\ndef index_main(request):\n if request.user.is_authenticated()==True:\n return HttpResponseRedirect(reverse(\"main\"))\n else:\n if request.method==\"POST\":\n form= RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data[\"email\"], form.cleaned_data[\"email\"], form.cleaned_data[\"password\"],)\n # Send a mail with verification code\n profile = UserProfile()\n profile.user =u\n profile.userType =1\n profile.mobile = form.cleaned_data[\"mobile\"]\n profile.save()\n \n org= Organization()\n org.owner = u\n org.save()\n return HttpResponse(\"Thanks\") \n else:\n form =RegistrationForm()\n return render( request, 'index_main.html', context={\"form\":form},)\n\n \ndef docListOwner(request):\n pass\ndef docUploadOwner(request):\n pass\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request,'ownerMain.html',context)\n\n# ====================================\n# DATA PROVIDERS\n# ====================================\nimport json\nfrom django.core import serializers\n\ndef get_universitys(request):\n p={}\n # import ipdb; ipdb.set_trace()\n for c in University.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_publishers(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Publisher.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_courses(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Course.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_topics(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Topic.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_tags(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Tag.objects.all():\n p[c.name] = (c.name,c.id)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_services(request):\n p={}\n # import ipdb; ipdb.set_trace()\n for c in Service.objects.all():\n p[c.name] = (c.name,c.id)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_colleges(request):\n p={}\n for c in College.objects.all():\n p[c.name] =(str(c.latitude), str(c.longitude))\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_cities(request):\n p={}\n for c in City.objects.all():\n p[c.name] =(str(c.latitude), str(c.longitude))\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n",
"step-ids": [
23,
24,
32,
34,
37
]
}
|
[
23,
24,
32,
34,
37
] |
def interseccao_chaves(lis_dic):
lista = []
for dic1 in lis_dic[0]:
for cahves in dic1:
lista.append(dic1)
for dic2 in lis_dic[1]:
for cahves in dic2:
lista.append(dic2)
return lista
|
normal
|
{
"blob_id": "f3ff453655d7938cb417ce212f3836fabafaea43",
"index": 1696,
"step-1": "<mask token>\n",
"step-2": "def interseccao_chaves(lis_dic):\n lista = []\n for dic1 in lis_dic[0]:\n for cahves in dic1:\n lista.append(dic1)\n for dic2 in lis_dic[1]:\n for cahves in dic2:\n lista.append(dic2)\n return lista\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(cv2.__version__)
<|reserved_special_token_0|>
print(image)
print(image.shape)
print(image[0])
print('~~~~~~~~~~~~~~~')
print(image.shape[0])
print('~~~~~~~~~~~~~~~')
print(len(image))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(cv2.__version__)
image = cv2.imread('download.jpeg', 1)
print(image)
print(image.shape)
print(image[0])
print('~~~~~~~~~~~~~~~')
print(image.shape[0])
print('~~~~~~~~~~~~~~~')
print(len(image))
<|reserved_special_token_1|>
import cv2
print(cv2.__version__)
image = cv2.imread('download.jpeg', 1)
print(image)
print(image.shape)
print(image[0])
print('~~~~~~~~~~~~~~~')
print(image.shape[0])
print('~~~~~~~~~~~~~~~')
print(len(image))
<|reserved_special_token_1|>
import cv2
print(cv2.__version__)
image = cv2.imread("download.jpeg", 1)
print(image)
print(image.shape)
print(image[0])
print("~~~~~~~~~~~~~~~")
print(image.shape[0])
print("~~~~~~~~~~~~~~~")
print(len(image))
|
flexible
|
{
"blob_id": "0b0ae6101fd80bdbcf37b935268f3e49230599fb",
"index": 5715,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(cv2.__version__)\n<mask token>\nprint(image)\nprint(image.shape)\nprint(image[0])\nprint('~~~~~~~~~~~~~~~')\nprint(image.shape[0])\nprint('~~~~~~~~~~~~~~~')\nprint(len(image))\n",
"step-3": "<mask token>\nprint(cv2.__version__)\nimage = cv2.imread('download.jpeg', 1)\nprint(image)\nprint(image.shape)\nprint(image[0])\nprint('~~~~~~~~~~~~~~~')\nprint(image.shape[0])\nprint('~~~~~~~~~~~~~~~')\nprint(len(image))\n",
"step-4": "import cv2\nprint(cv2.__version__)\nimage = cv2.imread('download.jpeg', 1)\nprint(image)\nprint(image.shape)\nprint(image[0])\nprint('~~~~~~~~~~~~~~~')\nprint(image.shape[0])\nprint('~~~~~~~~~~~~~~~')\nprint(len(image))\n",
"step-5": "import cv2\nprint(cv2.__version__)\n\nimage = cv2.imread(\"download.jpeg\", 1)\nprint(image)\nprint(image.shape)\n\nprint(image[0])\nprint(\"~~~~~~~~~~~~~~~\")\nprint(image.shape[0])\nprint(\"~~~~~~~~~~~~~~~\")\nprint(len(image))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def load_a_affirm_target(gfe_path=GFE_PATH):
csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeta)
def load_a_cond_data(gfe_path=GFE_PATH):
csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathc)
def load_a_cond_target(gfe_path=GFE_PATH):
csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetc)
<|reserved_special_token_0|>
def load_a_emphasis_target(gfe_path=GFE_PATH):
csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targete)
def load_a_neg_data(gfe_path=GFE_PATH):
csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathn)
def load_a_neg_target(gfe_path=GFE_PATH):
csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetn)
def load_a_rel_data(gfe_path=GFE_PATH):
csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathr)
<|reserved_special_token_0|>
def load_a_topics_data(gfe_path=GFE_PATH):
csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patht)
<|reserved_special_token_0|>
def load_a_wh_target(gfe_path=GFE_PATH):
csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetw)
<|reserved_special_token_0|>
def load_b_affirm_data(gfe_path=GFE_PATH):
csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathab)
<|reserved_special_token_0|>
def load_b_cond_target(gfe_path=GFE_PATH):
csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetcb)
def load_b_doubtq_data(gfe_path=GFE_PATH):
csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathdb)
def load_b_doubtq_target(gfe_path=GFE_PATH):
csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetdb)
<|reserved_special_token_0|>
def load_b_emphasis_target(gfe_path=GFE_PATH):
csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeteb)
def load_b_neg_data(gfe_path=GFE_PATH):
csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathnb)
def load_b_neg_target(gfe_path=GFE_PATH):
csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetnb)
<|reserved_special_token_0|>
def load_b_wh_target(gfe_path=GFE_PATH):
csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetwb)
def load_b_yn_data(gfe_path=GFE_PATH):
csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathyb)
def load_b_yn_target(gfe_path=GFE_PATH):
csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetyb)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_a_affirm_data(gfe_path=GFE_PATH):
csv_patha = os.path.join(gfe_path, 'a_affirmative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patha)
def load_a_affirm_target(gfe_path=GFE_PATH):
csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeta)
def load_a_cond_data(gfe_path=GFE_PATH):
csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathc)
def load_a_cond_target(gfe_path=GFE_PATH):
csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetc)
<|reserved_special_token_0|>
def load_a_emphasis_target(gfe_path=GFE_PATH):
csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targete)
def load_a_neg_data(gfe_path=GFE_PATH):
csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathn)
def load_a_neg_target(gfe_path=GFE_PATH):
csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetn)
def load_a_rel_data(gfe_path=GFE_PATH):
csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathr)
def load_a_rel_target(gfe_path=GFE_PATH):
csv_targetr = os.path.join(gfe_path, 'a_relative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetr)
def load_a_topics_data(gfe_path=GFE_PATH):
csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patht)
<|reserved_special_token_0|>
def load_a_wh_target(gfe_path=GFE_PATH):
csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetw)
def load_a_yn_data(gfe_path=GFE_PATH):
csv_pathy = os.path.join(gfe_path, 'a_yn_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathy)
def load_a_yn_target(gfe_path=GFE_PATH):
csv_targety = os.path.join(gfe_path, 'a_yn_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targety)
def load_b_affirm_data(gfe_path=GFE_PATH):
csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathab)
<|reserved_special_token_0|>
def load_b_cond_data(gfe_path=GFE_PATH):
csv_pathcb = os.path.join(gfe_path, 'b_conditional_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathcb)
def load_b_cond_target(gfe_path=GFE_PATH):
csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetcb)
def load_b_doubtq_data(gfe_path=GFE_PATH):
csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathdb)
def load_b_doubtq_target(gfe_path=GFE_PATH):
csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetdb)
def load_b_emphasis_data(gfe_path=GFE_PATH):
csv_patheb = os.path.join(gfe_path, 'b_emphasis_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patheb)
def load_b_emphasis_target(gfe_path=GFE_PATH):
csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeteb)
def load_b_neg_data(gfe_path=GFE_PATH):
csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathnb)
def load_b_neg_target(gfe_path=GFE_PATH):
csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetnb)
def load_b_rel_data(gfe_path=GFE_PATH):
csv_pathrb = os.path.join(gfe_path, 'b_relative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathrb)
<|reserved_special_token_0|>
def load_b_topics_data(gfe_path=GFE_PATH):
csv_pathtb = os.path.join(gfe_path, 'b_topics_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathtb)
<|reserved_special_token_0|>
def load_b_wh_target(gfe_path=GFE_PATH):
csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetwb)
def load_b_yn_data(gfe_path=GFE_PATH):
csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathyb)
def load_b_yn_target(gfe_path=GFE_PATH):
csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetyb)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_a_affirm_data(gfe_path=GFE_PATH):
csv_patha = os.path.join(gfe_path, 'a_affirmative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patha)
def load_a_affirm_target(gfe_path=GFE_PATH):
csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeta)
def load_a_cond_data(gfe_path=GFE_PATH):
csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathc)
def load_a_cond_target(gfe_path=GFE_PATH):
csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetc)
def load_a_doubtq_data(gfe_path=GFE_PATH):
csv_pathd = os.path.join(gfe_path, 'a_doubt_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathd)
<|reserved_special_token_0|>
def load_a_emphasis_target(gfe_path=GFE_PATH):
csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targete)
def load_a_neg_data(gfe_path=GFE_PATH):
csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathn)
def load_a_neg_target(gfe_path=GFE_PATH):
csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetn)
def load_a_rel_data(gfe_path=GFE_PATH):
csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathr)
def load_a_rel_target(gfe_path=GFE_PATH):
csv_targetr = os.path.join(gfe_path, 'a_relative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetr)
def load_a_topics_data(gfe_path=GFE_PATH):
csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patht)
<|reserved_special_token_0|>
def load_a_wh_target(gfe_path=GFE_PATH):
csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetw)
def load_a_yn_data(gfe_path=GFE_PATH):
csv_pathy = os.path.join(gfe_path, 'a_yn_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathy)
def load_a_yn_target(gfe_path=GFE_PATH):
csv_targety = os.path.join(gfe_path, 'a_yn_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targety)
def load_b_affirm_data(gfe_path=GFE_PATH):
csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathab)
<|reserved_special_token_0|>
def load_b_cond_data(gfe_path=GFE_PATH):
csv_pathcb = os.path.join(gfe_path, 'b_conditional_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathcb)
def load_b_cond_target(gfe_path=GFE_PATH):
csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetcb)
def load_b_doubtq_data(gfe_path=GFE_PATH):
csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathdb)
def load_b_doubtq_target(gfe_path=GFE_PATH):
csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetdb)
def load_b_emphasis_data(gfe_path=GFE_PATH):
csv_patheb = os.path.join(gfe_path, 'b_emphasis_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patheb)
def load_b_emphasis_target(gfe_path=GFE_PATH):
csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeteb)
def load_b_neg_data(gfe_path=GFE_PATH):
csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathnb)
def load_b_neg_target(gfe_path=GFE_PATH):
csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetnb)
def load_b_rel_data(gfe_path=GFE_PATH):
csv_pathrb = os.path.join(gfe_path, 'b_relative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathrb)
def load_b_rel_target(gfe_path=GFE_PATH):
csv_targetrb = os.path.join(gfe_path, 'b_relative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetrb)
def load_b_topics_data(gfe_path=GFE_PATH):
csv_pathtb = os.path.join(gfe_path, 'b_topics_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathtb)
<|reserved_special_token_0|>
def load_b_wh_target(gfe_path=GFE_PATH):
csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetwb)
def load_b_yn_data(gfe_path=GFE_PATH):
csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathyb)
def load_b_yn_target(gfe_path=GFE_PATH):
csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetyb)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_a_affirm_data(gfe_path=GFE_PATH):
csv_patha = os.path.join(gfe_path, 'a_affirmative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patha)
def load_a_affirm_target(gfe_path=GFE_PATH):
csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeta)
def load_a_cond_data(gfe_path=GFE_PATH):
csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathc)
def load_a_cond_target(gfe_path=GFE_PATH):
csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetc)
def load_a_doubtq_data(gfe_path=GFE_PATH):
csv_pathd = os.path.join(gfe_path, 'a_doubt_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathd)
<|reserved_special_token_0|>
def load_a_emphasis_data(gfe_path=GFE_PATH):
csv_pathe = os.path.join(gfe_path, 'a_emphasis_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathe)
def load_a_emphasis_target(gfe_path=GFE_PATH):
csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targete)
def load_a_neg_data(gfe_path=GFE_PATH):
csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathn)
def load_a_neg_target(gfe_path=GFE_PATH):
csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetn)
def load_a_rel_data(gfe_path=GFE_PATH):
csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathr)
def load_a_rel_target(gfe_path=GFE_PATH):
csv_targetr = os.path.join(gfe_path, 'a_relative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetr)
def load_a_topics_data(gfe_path=GFE_PATH):
csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patht)
<|reserved_special_token_0|>
def load_a_wh_target(gfe_path=GFE_PATH):
csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetw)
def load_a_yn_data(gfe_path=GFE_PATH):
csv_pathy = os.path.join(gfe_path, 'a_yn_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathy)
def load_a_yn_target(gfe_path=GFE_PATH):
csv_targety = os.path.join(gfe_path, 'a_yn_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targety)
def load_b_affirm_data(gfe_path=GFE_PATH):
csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathab)
<|reserved_special_token_0|>
def load_b_cond_data(gfe_path=GFE_PATH):
csv_pathcb = os.path.join(gfe_path, 'b_conditional_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathcb)
def load_b_cond_target(gfe_path=GFE_PATH):
csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetcb)
def load_b_doubtq_data(gfe_path=GFE_PATH):
csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathdb)
def load_b_doubtq_target(gfe_path=GFE_PATH):
csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetdb)
def load_b_emphasis_data(gfe_path=GFE_PATH):
csv_patheb = os.path.join(gfe_path, 'b_emphasis_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_patheb)
def load_b_emphasis_target(gfe_path=GFE_PATH):
csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targeteb)
def load_b_neg_data(gfe_path=GFE_PATH):
csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathnb)
def load_b_neg_target(gfe_path=GFE_PATH):
csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetnb)
def load_b_rel_data(gfe_path=GFE_PATH):
csv_pathrb = os.path.join(gfe_path, 'b_relative_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathrb)
def load_b_rel_target(gfe_path=GFE_PATH):
csv_targetrb = os.path.join(gfe_path, 'b_relative_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetrb)
def load_b_topics_data(gfe_path=GFE_PATH):
csv_pathtb = os.path.join(gfe_path, 'b_topics_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathtb)
<|reserved_special_token_0|>
def load_b_wh_target(gfe_path=GFE_PATH):
csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetwb)
def load_b_yn_data(gfe_path=GFE_PATH):
csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')
print(gfe_path)
return pd.read_csv(csv_pathyb)
def load_b_yn_target(gfe_path=GFE_PATH):
csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')
print(gfe_path)
return pd.read_csv(csv_targetyb)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
import os
GFE_PATH = "C:\Haely\MS2017\sem2\EE 259\Project\grammatical_facial_expression"
def load_a_affirm_data(gfe_path=GFE_PATH):
csv_patha = os.path.join(gfe_path, "a_affirmative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_patha)
def load_a_affirm_target(gfe_path=GFE_PATH):
csv_targeta = os.path.join(gfe_path, "a_affirmative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targeta)
def load_a_cond_data(gfe_path=GFE_PATH):
csv_pathc = os.path.join(gfe_path, "a_conditional_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathc)
def load_a_cond_target(gfe_path=GFE_PATH):
csv_targetc = os.path.join(gfe_path, "a_conditional_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetc)
def load_a_doubtq_data(gfe_path=GFE_PATH):
csv_pathd = os.path.join(gfe_path, "a_doubt_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathd)
def load_a_doubtq_target(gfe_path=GFE_PATH):
csv_targetd = os.path.join(gfe_path, "a_doubts_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetd)
def load_a_emphasis_data(gfe_path=GFE_PATH):
csv_pathe = os.path.join(gfe_path, "a_emphasis_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathe)
def load_a_emphasis_target(gfe_path=GFE_PATH):
csv_targete = os.path.join(gfe_path, "a_emphasis_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targete)
def load_a_neg_data(gfe_path=GFE_PATH):
csv_pathn = os.path.join(gfe_path, "a_negative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathn)
def load_a_neg_target(gfe_path=GFE_PATH):
csv_targetn = os.path.join(gfe_path, "a_negative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetn)
def load_a_rel_data(gfe_path=GFE_PATH):
csv_pathr = os.path.join(gfe_path, "a_relative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathr)
def load_a_rel_target(gfe_path=GFE_PATH):
csv_targetr = os.path.join(gfe_path, "a_relative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetr)
def load_a_topics_data(gfe_path=GFE_PATH):
csv_patht = os.path.join(gfe_path, "a_topics_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_patht)
def load_a_topics_target(gfe_path=GFE_PATH):
csv_targett = os.path.join(gfe_path, "a_topics_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targett)
def load_a_wh_data(gfe_path=GFE_PATH):
csv_pathw = os.path.join(gfe_path, "a_wh_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathw)
def load_a_wh_target(gfe_path=GFE_PATH):
csv_targetw = os.path.join(gfe_path, "a_wh_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetw)
def load_a_yn_data(gfe_path=GFE_PATH):
csv_pathy = os.path.join(gfe_path, "a_yn_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathy)
def load_a_yn_target(gfe_path=GFE_PATH):
csv_targety = os.path.join(gfe_path, "a_yn_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targety)
# In[3]:
def load_b_affirm_data(gfe_path=GFE_PATH):
csv_pathab = os.path.join(gfe_path, "b_affirmative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathab)
def load_b_affirm_target(gfe_path=GFE_PATH):
csv_targetab = os.path.join(gfe_path, "b_affirmative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetab)
def load_b_cond_data(gfe_path=GFE_PATH):
csv_pathcb = os.path.join(gfe_path, "b_conditional_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathcb)
def load_b_cond_target(gfe_path=GFE_PATH):
csv_targetcb = os.path.join(gfe_path, "b_conditional_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetcb)
def load_b_doubtq_data(gfe_path=GFE_PATH):
csv_pathdb = os.path.join(gfe_path, "b_doubt_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathdb)
def load_b_doubtq_target(gfe_path=GFE_PATH):
csv_targetdb = os.path.join(gfe_path, "b_doubt_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetdb)
def load_b_emphasis_data(gfe_path=GFE_PATH):
csv_patheb = os.path.join(gfe_path, "b_emphasis_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_patheb)
def load_b_emphasis_target(gfe_path=GFE_PATH):
csv_targeteb = os.path.join(gfe_path, "b_emphasis_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targeteb)
def load_b_neg_data(gfe_path=GFE_PATH):
csv_pathnb = os.path.join(gfe_path, "b_negative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathnb)
def load_b_neg_target(gfe_path=GFE_PATH):
csv_targetnb = os.path.join(gfe_path, "b_negative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetnb)
def load_b_rel_data(gfe_path=GFE_PATH):
csv_pathrb = os.path.join(gfe_path, "b_relative_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathrb)
def load_b_rel_target(gfe_path=GFE_PATH):
csv_targetrb = os.path.join(gfe_path, "b_relative_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetrb)
def load_b_topics_data(gfe_path=GFE_PATH):
csv_pathtb = os.path.join(gfe_path, "b_topics_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathtb)
def load_b_topics_target(gfe_path=GFE_PATH):
csv_targettb = os.path.join(gfe_path, "b_topics_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targettb)
def load_b_wh_data(gfe_path=GFE_PATH):
csv_pathwb = os.path.join(gfe_path, "b_wh_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathwb)
def load_b_wh_target(gfe_path=GFE_PATH):
csv_targetwb = os.path.join(gfe_path, "b_wh_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetwb)
def load_b_yn_data(gfe_path=GFE_PATH):
csv_pathyb = os.path.join(gfe_path, "b_yn_question_datapoints.csv")
print(gfe_path)
return pd.read_csv(csv_pathyb)
def load_b_yn_target(gfe_path=GFE_PATH):
csv_targetyb = os.path.join(gfe_path, "b_yn_question_targets.csv")
print(gfe_path)
return pd.read_csv(csv_targetyb)
# In[4]:
affirmda = load_a_affirm_data()
affirmta = load_a_affirm_target()
condda = load_a_cond_data()
condta = load_a_cond_target()
doubtqda = load_a_doubtq_data()
doubtqta = load_a_doubtq_target()
emphda = load_a_emphasis_data()
emphta = load_a_emphasis_target()
negda = load_a_neg_data()
negta = load_a_neg_target()
relda = load_a_rel_data()
relta = load_a_rel_target()
topicsda = load_a_topics_data()
topicsta = load_a_topics_target()
whda = load_a_wh_data()
whta = load_a_wh_target()
ynda = load_a_yn_data()
ynta = load_a_yn_target()
# In[5]:
affirmdb = load_b_affirm_data()
affirmtb = load_b_affirm_target()
conddb = load_b_cond_data()
condtb = load_b_cond_target()
doubtqdb = load_b_doubtq_data()
doubtqtb = load_b_doubtq_target()
emphdb = load_b_emphasis_data()
emphtb = load_b_emphasis_target()
negdb = load_b_neg_data()
negtb = load_b_neg_target()
reldb = load_b_rel_data()
reltb = load_b_rel_target()
topicsdb = load_b_topics_data()
topicstb = load_b_topics_target()
whdb = load_b_wh_data()
whtb = load_b_wh_target()
yndb = load_b_yn_data()
yntb = load_b_yn_target()
# In[8]:
users_combine_affirmd = pd.concat([affirmda, affirmdb],ignore_index=True)
affirm_y = pd.concat([affirmta,affirmtb],ignore_index=True)
users_combine_condd = pd.concat([condda, conddb],ignore_index=True)
cond_y = pd.concat([condta, condtb],ignore_index=True)
users_combine_doubtqd = pd.concat([doubtqda, doubtqdb],ignore_index=True)
doubtq_y = pd.concat([doubtqta, doubtqtb],ignore_index=True)
users_combine_emphd = pd.concat([emphda, emphdb],ignore_index=True)
emph_y = pd.concat([emphta, emphtb],ignore_index=True)
users_combine_negd = pd.concat([negda, negdb],ignore_index=True)
neg_y = pd.concat([negta, negtb],ignore_index=True)
users_combine_reld = pd.concat([relda, reldb],ignore_index=True)
rel_y = pd.concat([relta, reltb],ignore_index=True)
users_combine_topicsd = pd.concat([topicsda, topicsdb],ignore_index=True)
topics_y = pd.concat([topicsta, topicstb],ignore_index=True)
users_combine_whd = pd.concat([whda, whdb],ignore_index=True)
wh_y = pd.concat([whta, whtb],ignore_index=True)
users_combine_ynd = pd.concat([ynda, yndb],ignore_index=True)
yn_y = pd.concat([ynta, yntb],ignore_index=True)
# In[11]:
users_combine_affirmd['affirm_y']=affirm_y
affirm_y.drop([10])
# In[12]:
users_combine_condd['cond_y']=cond_y
cond_y.drop([10])
# In[13]:
users_combine_doubtqd['doubtq_y']=doubtq_y
doubtq_y.drop([10])
# In[14]:
users_combine_emphd['emph_y']=emph_y
emph_y.drop([10])
# In[15]:
users_combine_negd['neg_y']=neg_y
neg_y.drop([10])
# In[16]:
users_combine_reld['rel_y']=rel_y
rel_y.drop([10])
# In[17]:
users_combine_topicsd['topics_y']=topics_y
topics_y.drop([10])
# In[18]:
users_combine_whd['wh_y']=wh_y
wh_y.drop([10])
# In[19]:
users_combine_ynd['yn_y']=yn_y
yn_y.drop([10])
# In[22]:
from sklearn.model_selection import train_test_split
ya=users_combine_affirmd['affirm_y']
Xa_train,Xa_test,ya_train,ya_test = train_test_split(users_combine_affirmd.iloc[:,1:],ya,stratify=ya)
yc=users_combine_condd['cond_y']
Xc_train,Xc_test,yc_train,yc_test = train_test_split(users_combine_condd.iloc[:,1:],yc,stratify=yc)
yd=users_combine_doubtqd['doubtq_y']
Xd_train,Xd_test,yd_train,yd_test = train_test_split(users_combine_doubtqd.iloc[:,1:],yd,stratify=yd)
ye=users_combine_emphd['emph_y']
Xe_train,Xe_test,ye_train,ye_test = train_test_split(users_combine_emphd.iloc[:,1:],ye,stratify=ye)
yn=users_combine_negd['neg_y']
Xn_train,Xn_test,yn_train,yn_test = train_test_split(users_combine_negd.iloc[:,1:],yn,stratify=yn)
yr=users_combine_reld['rel_y']
Xr_train,Xr_test,yr_train,yr_test = train_test_split(users_combine_reld.iloc[:,1:],yr,stratify=yr)
yt=users_combine_topicsd['topics_y']
Xt_train,Xt_test,yt_train,yt_test = train_test_split(users_combine_topicsd.iloc[:,1:],yt,stratify=yt)
yw=users_combine_whd['wh_y']
Xw_train,Xw_test,yw_train,yw_test = train_test_split(users_combine_whd.iloc[:,1:],yw,stratify=yw)
yy=users_combine_ynd['yn_y']
Xy_train,Xy_test,yy_train,yy_test = train_test_split(users_combine_ynd.iloc[:,1:],yy,stratify=yy)
# In[25]:
from sklearn.preprocessing import scale
from scipy import stats
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda_clf = LDA(solver='lsqr',store_covariance=True)
lda_clf.fit(Xa_train,ya_train)
ya_predicted = lda_clf.predict(Xa_test)
print('\n The error rate of the LDA model for affirm is {0:.2f}% '.format(100*np.mean(ya_predicted!=ya_test)))
lda_clf.fit(Xc_train,yc_train)
yc_predicted = lda_clf.predict(Xc_test)
print('\n The error rate of the LDA model for conditional is {0:.2f}% '.format(100*np.mean(yc_predicted!=yc_test)))
lda_clf.fit(Xd_train,yd_train)
yd_predicted = lda_clf.predict(Xd_test)
print('\n The error rate of the LDA model for doubt questions is {0:.2f}% '.format(100*np.mean(yd_predicted!=yd_test)))
lda_clf.fit(Xe_train,ye_train)
ye_predicted = lda_clf.predict(Xe_test)
print('\n The error rate of the LDA model for emphasis is {0:.2f}% '.format(100*np.mean(ye_predicted!=ye_test)))
lda_clf.fit(Xn_train,yn_train)
yn_predicted = lda_clf.predict(Xn_test)
print('\n The error rate of the LDA model for negative is {0:.2f}% '.format(100*np.mean(yn_predicted!=yn_test)))
lda_clf.fit(Xr_train,yr_train)
yr_predicted = lda_clf.predict(Xr_test)
print('\n The error rate of the LDA model for relativr is {0:.2f}% '.format(100*np.mean(yr_predicted!=yr_test)))
lda_clf.fit(Xt_train,yt_train)
yt_predicted = lda_clf.predict(Xt_test)
print('\n The error rate of the LDA model for topics is {0:.2f}% '.format(100*np.mean(yt_predicted!=yt_test)))
lda_clf.fit(Xw_train,yw_train)
yw_predicted = lda_clf.predict(Xw_test)
print('\n The error rate of the LDA model for wh questions is {0:.2f}% '.format(100*np.mean(yw_predicted!=yw_test)))
lda_clf.fit(Xy_train,yy_train)
yy_predicted = lda_clf.predict(Xy_test)
print('\n The error rate of the LDA model for yes or no is {0:.2f}% '.format(100*np.mean(yy_predicted!=yy_test)))
|
flexible
|
{
"blob_id": "2fb8bce3a64787dbaf5a3bb3da53f70005048467",
"index": 4104,
"step-1": "<mask token>\n\n\ndef load_a_affirm_target(gfe_path=GFE_PATH):\n csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeta)\n\n\ndef load_a_cond_data(gfe_path=GFE_PATH):\n csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathc)\n\n\ndef load_a_cond_target(gfe_path=GFE_PATH):\n csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetc)\n\n\n<mask token>\n\n\ndef load_a_emphasis_target(gfe_path=GFE_PATH):\n csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targete)\n\n\ndef load_a_neg_data(gfe_path=GFE_PATH):\n csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathn)\n\n\ndef load_a_neg_target(gfe_path=GFE_PATH):\n csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetn)\n\n\ndef load_a_rel_data(gfe_path=GFE_PATH):\n csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathr)\n\n\n<mask token>\n\n\ndef load_a_topics_data(gfe_path=GFE_PATH):\n csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patht)\n\n\n<mask token>\n\n\ndef load_a_wh_target(gfe_path=GFE_PATH):\n csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetw)\n\n\n<mask token>\n\n\ndef load_b_affirm_data(gfe_path=GFE_PATH):\n csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathab)\n\n\n<mask token>\n\n\ndef load_b_cond_target(gfe_path=GFE_PATH):\n csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetcb)\n\n\ndef load_b_doubtq_data(gfe_path=GFE_PATH):\n csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathdb)\n\n\ndef load_b_doubtq_target(gfe_path=GFE_PATH):\n csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetdb)\n\n\n<mask token>\n\n\ndef load_b_emphasis_target(gfe_path=GFE_PATH):\n csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeteb)\n\n\ndef load_b_neg_data(gfe_path=GFE_PATH):\n csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathnb)\n\n\ndef load_b_neg_target(gfe_path=GFE_PATH):\n csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetnb)\n\n\n<mask token>\n\n\ndef load_b_wh_target(gfe_path=GFE_PATH):\n csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetwb)\n\n\ndef load_b_yn_data(gfe_path=GFE_PATH):\n csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathyb)\n\n\ndef load_b_yn_target(gfe_path=GFE_PATH):\n csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetyb)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_a_affirm_data(gfe_path=GFE_PATH):\n csv_patha = os.path.join(gfe_path, 'a_affirmative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patha)\n\n\ndef load_a_affirm_target(gfe_path=GFE_PATH):\n csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeta)\n\n\ndef load_a_cond_data(gfe_path=GFE_PATH):\n csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathc)\n\n\ndef load_a_cond_target(gfe_path=GFE_PATH):\n csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetc)\n\n\n<mask token>\n\n\ndef load_a_emphasis_target(gfe_path=GFE_PATH):\n csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targete)\n\n\ndef load_a_neg_data(gfe_path=GFE_PATH):\n csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathn)\n\n\ndef load_a_neg_target(gfe_path=GFE_PATH):\n csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetn)\n\n\ndef load_a_rel_data(gfe_path=GFE_PATH):\n csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathr)\n\n\ndef load_a_rel_target(gfe_path=GFE_PATH):\n csv_targetr = os.path.join(gfe_path, 'a_relative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetr)\n\n\ndef load_a_topics_data(gfe_path=GFE_PATH):\n csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patht)\n\n\n<mask token>\n\n\ndef load_a_wh_target(gfe_path=GFE_PATH):\n csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetw)\n\n\ndef load_a_yn_data(gfe_path=GFE_PATH):\n csv_pathy = os.path.join(gfe_path, 'a_yn_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathy)\n\n\ndef load_a_yn_target(gfe_path=GFE_PATH):\n csv_targety = os.path.join(gfe_path, 'a_yn_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targety)\n\n\ndef load_b_affirm_data(gfe_path=GFE_PATH):\n csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathab)\n\n\n<mask token>\n\n\ndef load_b_cond_data(gfe_path=GFE_PATH):\n csv_pathcb = os.path.join(gfe_path, 'b_conditional_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathcb)\n\n\ndef load_b_cond_target(gfe_path=GFE_PATH):\n csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetcb)\n\n\ndef load_b_doubtq_data(gfe_path=GFE_PATH):\n csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathdb)\n\n\ndef load_b_doubtq_target(gfe_path=GFE_PATH):\n csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetdb)\n\n\ndef load_b_emphasis_data(gfe_path=GFE_PATH):\n csv_patheb = os.path.join(gfe_path, 'b_emphasis_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patheb)\n\n\ndef load_b_emphasis_target(gfe_path=GFE_PATH):\n csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeteb)\n\n\ndef load_b_neg_data(gfe_path=GFE_PATH):\n csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathnb)\n\n\ndef load_b_neg_target(gfe_path=GFE_PATH):\n csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetnb)\n\n\ndef load_b_rel_data(gfe_path=GFE_PATH):\n csv_pathrb = os.path.join(gfe_path, 'b_relative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathrb)\n\n\n<mask token>\n\n\ndef load_b_topics_data(gfe_path=GFE_PATH):\n csv_pathtb = os.path.join(gfe_path, 'b_topics_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathtb)\n\n\n<mask token>\n\n\ndef load_b_wh_target(gfe_path=GFE_PATH):\n csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetwb)\n\n\ndef load_b_yn_data(gfe_path=GFE_PATH):\n csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathyb)\n\n\ndef load_b_yn_target(gfe_path=GFE_PATH):\n csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetyb)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_a_affirm_data(gfe_path=GFE_PATH):\n csv_patha = os.path.join(gfe_path, 'a_affirmative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patha)\n\n\ndef load_a_affirm_target(gfe_path=GFE_PATH):\n csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeta)\n\n\ndef load_a_cond_data(gfe_path=GFE_PATH):\n csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathc)\n\n\ndef load_a_cond_target(gfe_path=GFE_PATH):\n csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetc)\n\n\ndef load_a_doubtq_data(gfe_path=GFE_PATH):\n csv_pathd = os.path.join(gfe_path, 'a_doubt_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathd)\n\n\n<mask token>\n\n\ndef load_a_emphasis_target(gfe_path=GFE_PATH):\n csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targete)\n\n\ndef load_a_neg_data(gfe_path=GFE_PATH):\n csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathn)\n\n\ndef load_a_neg_target(gfe_path=GFE_PATH):\n csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetn)\n\n\ndef load_a_rel_data(gfe_path=GFE_PATH):\n csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathr)\n\n\ndef load_a_rel_target(gfe_path=GFE_PATH):\n csv_targetr = os.path.join(gfe_path, 'a_relative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetr)\n\n\ndef load_a_topics_data(gfe_path=GFE_PATH):\n csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patht)\n\n\n<mask token>\n\n\ndef load_a_wh_target(gfe_path=GFE_PATH):\n csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetw)\n\n\ndef load_a_yn_data(gfe_path=GFE_PATH):\n csv_pathy = os.path.join(gfe_path, 'a_yn_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathy)\n\n\ndef load_a_yn_target(gfe_path=GFE_PATH):\n csv_targety = os.path.join(gfe_path, 'a_yn_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targety)\n\n\ndef load_b_affirm_data(gfe_path=GFE_PATH):\n csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathab)\n\n\n<mask token>\n\n\ndef load_b_cond_data(gfe_path=GFE_PATH):\n csv_pathcb = os.path.join(gfe_path, 'b_conditional_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathcb)\n\n\ndef load_b_cond_target(gfe_path=GFE_PATH):\n csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetcb)\n\n\ndef load_b_doubtq_data(gfe_path=GFE_PATH):\n csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathdb)\n\n\ndef load_b_doubtq_target(gfe_path=GFE_PATH):\n csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetdb)\n\n\ndef load_b_emphasis_data(gfe_path=GFE_PATH):\n csv_patheb = os.path.join(gfe_path, 'b_emphasis_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patheb)\n\n\ndef load_b_emphasis_target(gfe_path=GFE_PATH):\n csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeteb)\n\n\ndef load_b_neg_data(gfe_path=GFE_PATH):\n csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathnb)\n\n\ndef load_b_neg_target(gfe_path=GFE_PATH):\n csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetnb)\n\n\ndef load_b_rel_data(gfe_path=GFE_PATH):\n csv_pathrb = os.path.join(gfe_path, 'b_relative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathrb)\n\n\ndef load_b_rel_target(gfe_path=GFE_PATH):\n csv_targetrb = os.path.join(gfe_path, 'b_relative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetrb)\n\n\ndef load_b_topics_data(gfe_path=GFE_PATH):\n csv_pathtb = os.path.join(gfe_path, 'b_topics_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathtb)\n\n\n<mask token>\n\n\ndef load_b_wh_target(gfe_path=GFE_PATH):\n csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetwb)\n\n\ndef load_b_yn_data(gfe_path=GFE_PATH):\n csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathyb)\n\n\ndef load_b_yn_target(gfe_path=GFE_PATH):\n csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetyb)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef load_a_affirm_data(gfe_path=GFE_PATH):\n csv_patha = os.path.join(gfe_path, 'a_affirmative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patha)\n\n\ndef load_a_affirm_target(gfe_path=GFE_PATH):\n csv_targeta = os.path.join(gfe_path, 'a_affirmative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeta)\n\n\ndef load_a_cond_data(gfe_path=GFE_PATH):\n csv_pathc = os.path.join(gfe_path, 'a_conditional_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathc)\n\n\ndef load_a_cond_target(gfe_path=GFE_PATH):\n csv_targetc = os.path.join(gfe_path, 'a_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetc)\n\n\ndef load_a_doubtq_data(gfe_path=GFE_PATH):\n csv_pathd = os.path.join(gfe_path, 'a_doubt_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathd)\n\n\n<mask token>\n\n\ndef load_a_emphasis_data(gfe_path=GFE_PATH):\n csv_pathe = os.path.join(gfe_path, 'a_emphasis_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathe)\n\n\ndef load_a_emphasis_target(gfe_path=GFE_PATH):\n csv_targete = os.path.join(gfe_path, 'a_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targete)\n\n\ndef load_a_neg_data(gfe_path=GFE_PATH):\n csv_pathn = os.path.join(gfe_path, 'a_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathn)\n\n\ndef load_a_neg_target(gfe_path=GFE_PATH):\n csv_targetn = os.path.join(gfe_path, 'a_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetn)\n\n\ndef load_a_rel_data(gfe_path=GFE_PATH):\n csv_pathr = os.path.join(gfe_path, 'a_relative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathr)\n\n\ndef load_a_rel_target(gfe_path=GFE_PATH):\n csv_targetr = os.path.join(gfe_path, 'a_relative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetr)\n\n\ndef load_a_topics_data(gfe_path=GFE_PATH):\n csv_patht = os.path.join(gfe_path, 'a_topics_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patht)\n\n\n<mask token>\n\n\ndef load_a_wh_target(gfe_path=GFE_PATH):\n csv_targetw = os.path.join(gfe_path, 'a_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetw)\n\n\ndef load_a_yn_data(gfe_path=GFE_PATH):\n csv_pathy = os.path.join(gfe_path, 'a_yn_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathy)\n\n\ndef load_a_yn_target(gfe_path=GFE_PATH):\n csv_targety = os.path.join(gfe_path, 'a_yn_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targety)\n\n\ndef load_b_affirm_data(gfe_path=GFE_PATH):\n csv_pathab = os.path.join(gfe_path, 'b_affirmative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathab)\n\n\n<mask token>\n\n\ndef load_b_cond_data(gfe_path=GFE_PATH):\n csv_pathcb = os.path.join(gfe_path, 'b_conditional_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathcb)\n\n\ndef load_b_cond_target(gfe_path=GFE_PATH):\n csv_targetcb = os.path.join(gfe_path, 'b_conditional_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetcb)\n\n\ndef load_b_doubtq_data(gfe_path=GFE_PATH):\n csv_pathdb = os.path.join(gfe_path, 'b_doubt_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathdb)\n\n\ndef load_b_doubtq_target(gfe_path=GFE_PATH):\n csv_targetdb = os.path.join(gfe_path, 'b_doubt_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetdb)\n\n\ndef load_b_emphasis_data(gfe_path=GFE_PATH):\n csv_patheb = os.path.join(gfe_path, 'b_emphasis_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_patheb)\n\n\ndef load_b_emphasis_target(gfe_path=GFE_PATH):\n csv_targeteb = os.path.join(gfe_path, 'b_emphasis_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targeteb)\n\n\ndef load_b_neg_data(gfe_path=GFE_PATH):\n csv_pathnb = os.path.join(gfe_path, 'b_negative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathnb)\n\n\ndef load_b_neg_target(gfe_path=GFE_PATH):\n csv_targetnb = os.path.join(gfe_path, 'b_negative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetnb)\n\n\ndef load_b_rel_data(gfe_path=GFE_PATH):\n csv_pathrb = os.path.join(gfe_path, 'b_relative_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathrb)\n\n\ndef load_b_rel_target(gfe_path=GFE_PATH):\n csv_targetrb = os.path.join(gfe_path, 'b_relative_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetrb)\n\n\ndef load_b_topics_data(gfe_path=GFE_PATH):\n csv_pathtb = os.path.join(gfe_path, 'b_topics_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathtb)\n\n\n<mask token>\n\n\ndef load_b_wh_target(gfe_path=GFE_PATH):\n csv_targetwb = os.path.join(gfe_path, 'b_wh_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetwb)\n\n\ndef load_b_yn_data(gfe_path=GFE_PATH):\n csv_pathyb = os.path.join(gfe_path, 'b_yn_question_datapoints.csv')\n print(gfe_path)\n return pd.read_csv(csv_pathyb)\n\n\ndef load_b_yn_target(gfe_path=GFE_PATH):\n csv_targetyb = os.path.join(gfe_path, 'b_yn_question_targets.csv')\n print(gfe_path)\n return pd.read_csv(csv_targetyb)\n\n\n<mask token>\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nimport os\nGFE_PATH = \"C:\\Haely\\MS2017\\sem2\\EE 259\\Project\\grammatical_facial_expression\"\n\ndef load_a_affirm_data(gfe_path=GFE_PATH):\n csv_patha = os.path.join(gfe_path, \"a_affirmative_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_patha)\ndef load_a_affirm_target(gfe_path=GFE_PATH):\n csv_targeta = os.path.join(gfe_path, \"a_affirmative_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targeta)\n\ndef load_a_cond_data(gfe_path=GFE_PATH):\n csv_pathc = os.path.join(gfe_path, \"a_conditional_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathc)\ndef load_a_cond_target(gfe_path=GFE_PATH):\n csv_targetc = os.path.join(gfe_path, \"a_conditional_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetc)\n\ndef load_a_doubtq_data(gfe_path=GFE_PATH):\n csv_pathd = os.path.join(gfe_path, \"a_doubt_question_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathd)\ndef load_a_doubtq_target(gfe_path=GFE_PATH):\n csv_targetd = os.path.join(gfe_path, \"a_doubts_question_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetd)\n\ndef load_a_emphasis_data(gfe_path=GFE_PATH):\n csv_pathe = os.path.join(gfe_path, \"a_emphasis_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathe)\ndef load_a_emphasis_target(gfe_path=GFE_PATH):\n csv_targete = os.path.join(gfe_path, \"a_emphasis_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targete)\n\ndef load_a_neg_data(gfe_path=GFE_PATH):\n csv_pathn = os.path.join(gfe_path, \"a_negative_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathn)\ndef load_a_neg_target(gfe_path=GFE_PATH):\n csv_targetn = os.path.join(gfe_path, \"a_negative_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetn)\n\ndef load_a_rel_data(gfe_path=GFE_PATH):\n csv_pathr = os.path.join(gfe_path, \"a_relative_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathr)\ndef load_a_rel_target(gfe_path=GFE_PATH):\n csv_targetr = os.path.join(gfe_path, \"a_relative_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetr)\n\ndef load_a_topics_data(gfe_path=GFE_PATH):\n csv_patht = os.path.join(gfe_path, \"a_topics_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_patht)\ndef load_a_topics_target(gfe_path=GFE_PATH):\n csv_targett = os.path.join(gfe_path, \"a_topics_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targett)\n\ndef load_a_wh_data(gfe_path=GFE_PATH):\n csv_pathw = os.path.join(gfe_path, \"a_wh_question_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathw)\ndef load_a_wh_target(gfe_path=GFE_PATH):\n csv_targetw = os.path.join(gfe_path, \"a_wh_question_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetw)\n\ndef load_a_yn_data(gfe_path=GFE_PATH):\n csv_pathy = os.path.join(gfe_path, \"a_yn_question_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathy)\ndef load_a_yn_target(gfe_path=GFE_PATH):\n csv_targety = os.path.join(gfe_path, \"a_yn_question_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targety)\n\n\n# In[3]:\n\n\ndef load_b_affirm_data(gfe_path=GFE_PATH):\n csv_pathab = os.path.join(gfe_path, \"b_affirmative_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathab)\ndef load_b_affirm_target(gfe_path=GFE_PATH):\n csv_targetab = os.path.join(gfe_path, \"b_affirmative_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetab)\n\ndef load_b_cond_data(gfe_path=GFE_PATH):\n csv_pathcb = os.path.join(gfe_path, \"b_conditional_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathcb)\ndef load_b_cond_target(gfe_path=GFE_PATH):\n csv_targetcb = os.path.join(gfe_path, \"b_conditional_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetcb)\n\ndef load_b_doubtq_data(gfe_path=GFE_PATH):\n csv_pathdb = os.path.join(gfe_path, \"b_doubt_question_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathdb)\ndef load_b_doubtq_target(gfe_path=GFE_PATH):\n csv_targetdb = os.path.join(gfe_path, \"b_doubt_question_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetdb)\n\ndef load_b_emphasis_data(gfe_path=GFE_PATH):\n csv_patheb = os.path.join(gfe_path, \"b_emphasis_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_patheb)\ndef load_b_emphasis_target(gfe_path=GFE_PATH):\n csv_targeteb = os.path.join(gfe_path, \"b_emphasis_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targeteb)\n\ndef load_b_neg_data(gfe_path=GFE_PATH):\n csv_pathnb = os.path.join(gfe_path, \"b_negative_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathnb)\ndef load_b_neg_target(gfe_path=GFE_PATH):\n csv_targetnb = os.path.join(gfe_path, \"b_negative_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetnb)\n\ndef load_b_rel_data(gfe_path=GFE_PATH):\n csv_pathrb = os.path.join(gfe_path, \"b_relative_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathrb)\ndef load_b_rel_target(gfe_path=GFE_PATH):\n csv_targetrb = os.path.join(gfe_path, \"b_relative_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetrb)\n\ndef load_b_topics_data(gfe_path=GFE_PATH):\n csv_pathtb = os.path.join(gfe_path, \"b_topics_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathtb)\ndef load_b_topics_target(gfe_path=GFE_PATH):\n csv_targettb = os.path.join(gfe_path, \"b_topics_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targettb)\n\ndef load_b_wh_data(gfe_path=GFE_PATH):\n csv_pathwb = os.path.join(gfe_path, \"b_wh_question_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathwb)\ndef load_b_wh_target(gfe_path=GFE_PATH):\n csv_targetwb = os.path.join(gfe_path, \"b_wh_question_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetwb)\n\ndef load_b_yn_data(gfe_path=GFE_PATH):\n csv_pathyb = os.path.join(gfe_path, \"b_yn_question_datapoints.csv\")\n print(gfe_path)\n return pd.read_csv(csv_pathyb)\ndef load_b_yn_target(gfe_path=GFE_PATH):\n csv_targetyb = os.path.join(gfe_path, \"b_yn_question_targets.csv\")\n print(gfe_path)\n return pd.read_csv(csv_targetyb)\n\n\n# In[4]:\n\n\naffirmda = load_a_affirm_data()\naffirmta = load_a_affirm_target()\n\ncondda = load_a_cond_data()\ncondta = load_a_cond_target()\n\ndoubtqda = load_a_doubtq_data()\ndoubtqta = load_a_doubtq_target()\n\nemphda = load_a_emphasis_data()\nemphta = load_a_emphasis_target()\n\nnegda = load_a_neg_data()\nnegta = load_a_neg_target()\n\nrelda = load_a_rel_data()\nrelta = load_a_rel_target()\n\ntopicsda = load_a_topics_data()\ntopicsta = load_a_topics_target()\n\nwhda = load_a_wh_data()\nwhta = load_a_wh_target()\n\nynda = load_a_yn_data()\nynta = load_a_yn_target()\n\n\n# In[5]:\n\n\naffirmdb = load_b_affirm_data()\naffirmtb = load_b_affirm_target()\n\nconddb = load_b_cond_data()\ncondtb = load_b_cond_target()\n\ndoubtqdb = load_b_doubtq_data()\ndoubtqtb = load_b_doubtq_target()\n\nemphdb = load_b_emphasis_data()\nemphtb = load_b_emphasis_target()\n\nnegdb = load_b_neg_data()\nnegtb = load_b_neg_target()\n\nreldb = load_b_rel_data()\nreltb = load_b_rel_target()\n\ntopicsdb = load_b_topics_data()\ntopicstb = load_b_topics_target()\n\nwhdb = load_b_wh_data()\nwhtb = load_b_wh_target()\n\nyndb = load_b_yn_data()\nyntb = load_b_yn_target()\n\n\n# In[8]:\n\n\nusers_combine_affirmd = pd.concat([affirmda, affirmdb],ignore_index=True)\naffirm_y = pd.concat([affirmta,affirmtb],ignore_index=True)\n\nusers_combine_condd = pd.concat([condda, conddb],ignore_index=True)\ncond_y = pd.concat([condta, condtb],ignore_index=True)\n\nusers_combine_doubtqd = pd.concat([doubtqda, doubtqdb],ignore_index=True)\ndoubtq_y = pd.concat([doubtqta, doubtqtb],ignore_index=True)\n\nusers_combine_emphd = pd.concat([emphda, emphdb],ignore_index=True)\nemph_y = pd.concat([emphta, emphtb],ignore_index=True)\n\nusers_combine_negd = pd.concat([negda, negdb],ignore_index=True)\nneg_y = pd.concat([negta, negtb],ignore_index=True)\n\nusers_combine_reld = pd.concat([relda, reldb],ignore_index=True)\nrel_y = pd.concat([relta, reltb],ignore_index=True)\n\nusers_combine_topicsd = pd.concat([topicsda, topicsdb],ignore_index=True)\ntopics_y = pd.concat([topicsta, topicstb],ignore_index=True)\n\nusers_combine_whd = pd.concat([whda, whdb],ignore_index=True)\nwh_y = pd.concat([whta, whtb],ignore_index=True)\n\nusers_combine_ynd = pd.concat([ynda, yndb],ignore_index=True)\nyn_y = pd.concat([ynta, yntb],ignore_index=True)\n\n\n# In[11]:\n\n\nusers_combine_affirmd['affirm_y']=affirm_y\naffirm_y.drop([10]) \n\n\n\n# In[12]:\n\n\nusers_combine_condd['cond_y']=cond_y\ncond_y.drop([10]) \n\n\n# In[13]:\n\n\nusers_combine_doubtqd['doubtq_y']=doubtq_y\ndoubtq_y.drop([10]) \n\n\n# In[14]:\n\n\nusers_combine_emphd['emph_y']=emph_y\nemph_y.drop([10]) \n\n\n# In[15]:\n\n\nusers_combine_negd['neg_y']=neg_y\nneg_y.drop([10]) \n\n\n# In[16]:\n\n\nusers_combine_reld['rel_y']=rel_y\nrel_y.drop([10]) \n\n\n# In[17]:\n\n\nusers_combine_topicsd['topics_y']=topics_y\ntopics_y.drop([10]) \n\n\n# In[18]:\n\n\nusers_combine_whd['wh_y']=wh_y\nwh_y.drop([10]) \n\n\n# In[19]:\n\n\nusers_combine_ynd['yn_y']=yn_y\nyn_y.drop([10]) \n\n\n# In[22]:\n\n\nfrom sklearn.model_selection import train_test_split\nya=users_combine_affirmd['affirm_y']\nXa_train,Xa_test,ya_train,ya_test = train_test_split(users_combine_affirmd.iloc[:,1:],ya,stratify=ya)\n\nyc=users_combine_condd['cond_y']\nXc_train,Xc_test,yc_train,yc_test = train_test_split(users_combine_condd.iloc[:,1:],yc,stratify=yc)\n\nyd=users_combine_doubtqd['doubtq_y']\nXd_train,Xd_test,yd_train,yd_test = train_test_split(users_combine_doubtqd.iloc[:,1:],yd,stratify=yd)\n\nye=users_combine_emphd['emph_y']\nXe_train,Xe_test,ye_train,ye_test = train_test_split(users_combine_emphd.iloc[:,1:],ye,stratify=ye)\n\nyn=users_combine_negd['neg_y']\nXn_train,Xn_test,yn_train,yn_test = train_test_split(users_combine_negd.iloc[:,1:],yn,stratify=yn)\n\nyr=users_combine_reld['rel_y']\nXr_train,Xr_test,yr_train,yr_test = train_test_split(users_combine_reld.iloc[:,1:],yr,stratify=yr)\n\nyt=users_combine_topicsd['topics_y']\nXt_train,Xt_test,yt_train,yt_test = train_test_split(users_combine_topicsd.iloc[:,1:],yt,stratify=yt)\n\nyw=users_combine_whd['wh_y']\nXw_train,Xw_test,yw_train,yw_test = train_test_split(users_combine_whd.iloc[:,1:],yw,stratify=yw)\n\nyy=users_combine_ynd['yn_y']\nXy_train,Xy_test,yy_train,yy_test = train_test_split(users_combine_ynd.iloc[:,1:],yy,stratify=yy)\n\n\n\n# In[25]:\n\n\nfrom sklearn.preprocessing import scale\nfrom scipy import stats\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nlda_clf = LDA(solver='lsqr',store_covariance=True)\n\nlda_clf.fit(Xa_train,ya_train)\nya_predicted = lda_clf.predict(Xa_test)\nprint('\\n The error rate of the LDA model for affirm is {0:.2f}% '.format(100*np.mean(ya_predicted!=ya_test)))\n\nlda_clf.fit(Xc_train,yc_train)\nyc_predicted = lda_clf.predict(Xc_test)\nprint('\\n The error rate of the LDA model for conditional is {0:.2f}% '.format(100*np.mean(yc_predicted!=yc_test)))\n\nlda_clf.fit(Xd_train,yd_train)\nyd_predicted = lda_clf.predict(Xd_test)\nprint('\\n The error rate of the LDA model for doubt questions is {0:.2f}% '.format(100*np.mean(yd_predicted!=yd_test)))\n\nlda_clf.fit(Xe_train,ye_train)\nye_predicted = lda_clf.predict(Xe_test)\nprint('\\n The error rate of the LDA model for emphasis is {0:.2f}% '.format(100*np.mean(ye_predicted!=ye_test)))\n\nlda_clf.fit(Xn_train,yn_train)\nyn_predicted = lda_clf.predict(Xn_test)\nprint('\\n The error rate of the LDA model for negative is {0:.2f}% '.format(100*np.mean(yn_predicted!=yn_test)))\n\nlda_clf.fit(Xr_train,yr_train)\nyr_predicted = lda_clf.predict(Xr_test)\nprint('\\n The error rate of the LDA model for relativr is {0:.2f}% '.format(100*np.mean(yr_predicted!=yr_test)))\n\nlda_clf.fit(Xt_train,yt_train)\nyt_predicted = lda_clf.predict(Xt_test)\nprint('\\n The error rate of the LDA model for topics is {0:.2f}% '.format(100*np.mean(yt_predicted!=yt_test)))\n\nlda_clf.fit(Xw_train,yw_train)\nyw_predicted = lda_clf.predict(Xw_test)\nprint('\\n The error rate of the LDA model for wh questions is {0:.2f}% '.format(100*np.mean(yw_predicted!=yw_test)))\n\nlda_clf.fit(Xy_train,yy_train)\nyy_predicted = lda_clf.predict(Xy_test)\nprint('\\n The error rate of the LDA model for yes or no is {0:.2f}% '.format(100*np.mean(yy_predicted!=yy_test)))\n\n",
"step-ids": [
19,
27,
29,
30,
40
]
}
|
[
19,
27,
29,
30,
40
] |
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# California Institute of Technology
# (C) 2008 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from vnf.components.ComputationResultRetriever import ComputationResultRetriever as base
class Retriever(base):
def _retrieveResultsFor(self, computation):
director = self.director
db = director.clerk.db
orm = director.clerk.orm
analysisObj = orm.record2object(computation)
# must have a job
self.declareProgress(0.1, 'look up job')
job = computation.getJob(db)
# check result is available
self.declareProgress(0.2, 'check if results exist')
self._check_results_sanity(expected_results=[analysisObj._dosFile], job=job)
# create a dos record to save dos
self.declareProgress(0.4, 'create a DOS data object to store the result')
from vnf.dom.material_simulations.PhononDOS import PhononDOSTable
dos = self._make_result_holder(computation, PhononDOSTable)
#dos.matter = computation.matter #analysis calc does not have matter ref!
db.updateRecord(dos)
# save the result from job to dos
#dosObj = orm.record2object(dos)
#server = self.db.dereference(job.server)
#is_available = self.dds.is_available
#dosObj.read(analysisObj.dosFile)
#from idf import DOS
#DOS.write(dosObj.e, dosObj.i, 'data.idf')
self.declareProgress(0.5, 'get result from server')
#self._save_result(computation, job, analysisObj.dosFile, dos, 'data.txt')
self._save_result(computation, job, 'data.idf', dos, 'data.idf')
def retriever():
return Retriever('mddoscalc')
# version
__id__ = "$Id$"
# End of file
|
normal
|
{
"blob_id": "721e014bc5bf53a39556e31f281b77b90508cf12",
"index": 7138,
"step-1": "<mask token>\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n__id__ = '$Id$'\n",
"step-4": "from vnf.components.ComputationResultRetriever import ComputationResultRetriever as base\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n__id__ = '$Id$'\n",
"step-5": "# -*- Python -*-\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# California Institute of Technology\n# (C) 2008 All Rights Reserved\n#\n# {LicenseText}\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n\nfrom vnf.components.ComputationResultRetriever import ComputationResultRetriever as base\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n\n # must have a job\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n\n # check result is available\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile], job=job)\n\n # create a dos record to save dos\n self.declareProgress(0.4, 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n #dos.matter = computation.matter #analysis calc does not have matter ref!\n db.updateRecord(dos)\n\n # save the result from job to dos\n \n #dosObj = orm.record2object(dos)\n #server = self.db.dereference(job.server)\n #is_available = self.dds.is_available\n #dosObj.read(analysisObj.dosFile)\n #from idf import DOS\n #DOS.write(dosObj.e, dosObj.i, 'data.idf')\n self.declareProgress(0.5, 'get result from server')\n #self._save_result(computation, job, analysisObj.dosFile, dos, 'data.txt')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n# version\n__id__ = \"$Id$\"\n\n# End of file \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from plumbum import local, FG, ProcessExecutionError
import logging
import os.path
from task import app
kubectl = local["kubectl"]
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl["create", "-f", "-", "--logtostderr"] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info((kubectl["delete", name])())
return True
except ProcessExecutionError:
return False
|
normal
|
{
"blob_id": "137e80b3bfdc0dba33a3108b37d21d298a8f251d",
"index": 1544,
"step-1": "<mask token>\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-2": "<mask token>\n\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-3": "<mask token>\nkubectl = local['kubectl']\n\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-4": "from plumbum import local, FG, ProcessExecutionError\nimport logging\nimport os.path\nfrom task import app\nkubectl = local['kubectl']\n\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-5": "from plumbum import local, FG, ProcessExecutionError\nimport logging\nimport os.path\n\nfrom task import app\n\nkubectl = local[\"kubectl\"]\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl[\"create\", \"-f\", \"-\", \"--logtostderr\"] << template)())\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info((kubectl[\"delete\", name])())\n return True\n except ProcessExecutionError:\n return False\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv('regression.csv')
print(data)
x=data.iloc[:,0]
y=data.iloc[:,1]
mx=data['X1'].mean()
my=data['Y'].mean()
print(mx,my)
num, den = 0,0
for i in range(len(x)):
num += (x[i] - mx)*(y[i]-my)
den += (x[i]-mx)**2
beta1 = num/den
beta0 =my-(beta1*mx)
print(beta1,beta0)
Y_predict=beta1*x + beta0
plt.scatter(x,y)
plt.plot([min(x),max(x)],[min(Y_predict),max(Y_predict)], color='red')
plt.show()
ycap = []
for i in range(len(x)):
xdata =( beta1*x[i])+ beta0
ycap.append(xdata)
print(ycap)
residue=[]
for i in range(len(y)):
l = y[i] - ycap[i]
residue.append(l)
print(residue)
residualsum=sum(residue)
print(residualsum)
|
normal
|
{
"blob_id": "ca6b064dbd8200c49665eaa944fdf1fc80c25726",
"index": 1047,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(data)\n<mask token>\nprint(mx, my)\n<mask token>\nfor i in range(len(x)):\n num += (x[i] - mx) * (y[i] - my)\n den += (x[i] - mx) ** 2\n<mask token>\nprint(beta1, beta0)\n<mask token>\nplt.scatter(x, y)\nplt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')\nplt.show()\n<mask token>\nfor i in range(len(x)):\n xdata = beta1 * x[i] + beta0\n ycap.append(xdata)\nprint(ycap)\n<mask token>\nfor i in range(len(y)):\n l = y[i] - ycap[i]\n residue.append(l)\nprint(residue)\n<mask token>\nprint(residualsum)\n",
"step-3": "<mask token>\ndata = pd.read_csv('regression.csv')\nprint(data)\nx = data.iloc[:, 0]\ny = data.iloc[:, 1]\nmx = data['X1'].mean()\nmy = data['Y'].mean()\nprint(mx, my)\nnum, den = 0, 0\nfor i in range(len(x)):\n num += (x[i] - mx) * (y[i] - my)\n den += (x[i] - mx) ** 2\nbeta1 = num / den\nbeta0 = my - beta1 * mx\nprint(beta1, beta0)\nY_predict = beta1 * x + beta0\nplt.scatter(x, y)\nplt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')\nplt.show()\nycap = []\nfor i in range(len(x)):\n xdata = beta1 * x[i] + beta0\n ycap.append(xdata)\nprint(ycap)\nresidue = []\nfor i in range(len(y)):\n l = y[i] - ycap[i]\n residue.append(l)\nprint(residue)\nresidualsum = sum(residue)\nprint(residualsum)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ndata = pd.read_csv('regression.csv')\nprint(data)\nx = data.iloc[:, 0]\ny = data.iloc[:, 1]\nmx = data['X1'].mean()\nmy = data['Y'].mean()\nprint(mx, my)\nnum, den = 0, 0\nfor i in range(len(x)):\n num += (x[i] - mx) * (y[i] - my)\n den += (x[i] - mx) ** 2\nbeta1 = num / den\nbeta0 = my - beta1 * mx\nprint(beta1, beta0)\nY_predict = beta1 * x + beta0\nplt.scatter(x, y)\nplt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')\nplt.show()\nycap = []\nfor i in range(len(x)):\n xdata = beta1 * x[i] + beta0\n ycap.append(xdata)\nprint(ycap)\nresidue = []\nfor i in range(len(y)):\n l = y[i] - ycap[i]\n residue.append(l)\nprint(residue)\nresidualsum = sum(residue)\nprint(residualsum)\n",
"step-5": "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndata=pd.read_csv('regression.csv')\r\nprint(data)\r\nx=data.iloc[:,0]\r\ny=data.iloc[:,1]\r\nmx=data['X1'].mean()\r\nmy=data['Y'].mean()\r\nprint(mx,my)\r\n\r\nnum, den = 0,0\r\nfor i in range(len(x)):\r\n num += (x[i] - mx)*(y[i]-my)\r\n den += (x[i]-mx)**2\r\nbeta1 = num/den\r\nbeta0 =my-(beta1*mx)\r\nprint(beta1,beta0)\r\nY_predict=beta1*x + beta0\r\nplt.scatter(x,y)\r\n\r\nplt.plot([min(x),max(x)],[min(Y_predict),max(Y_predict)], color='red')\r\nplt.show()\r\n\r\nycap = []\r\nfor i in range(len(x)):\r\n xdata =( beta1*x[i])+ beta0\r\n ycap.append(xdata)\r\nprint(ycap)\r\nresidue=[]\r\nfor i in range(len(y)):\r\n l = y[i] - ycap[i]\r\n residue.append(l)\r\nprint(residue)\r\nresidualsum=sum(residue)\r\nprint(residualsum)\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import random
csvfilename = 'data/0901/exp1/xiaoxiong.csv'
df = pd.read_csv(csvfilename, header=None,
names=['abstime','posx','posy','posz','roty','rotx','anim'])
# skiprows=1, skipfooter=1)
df.head()
Xr=df['posx'].values
Yr=df['posy'].values
Zr=df['posz'].values
m=len(Xr)
print(m)
deltaTime = 0.0
totalTime = 0.0
P = 1.0*np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
rp = 0.01 # Noise of Position Measurement
R = np.matrix([[rp, 0.0, 0.0],
[0.0, rp, 0.0],
[0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]])
I = np.eye(9)
sp= 0.01 # Sigma for position noise
Xm = Xr + sp * (np.random.randn(m))
Ym = Yr + sp * (np.random.randn(m))
Zm = Zr + sp * (np.random.randn(m))
measurements = np.vstack((Xm,Ym,Zm))
x = np.matrix([measurements[0][0], measurements[1][0],measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
# Preallocation for Plotting
xt = []
yt = []
zt = []
mean = [i*0.01 for i in range(1,21)]
print(mean)
for i in mean:
random.seed(1)
randomFactor = [random.random() * 0.01 + (i - 0.005) for _ in range(m)]
for idx,step in enumerate(range(m)):
frameBegin = time.time()
time.sleep(randomFactor[idx])
computeBegin = time.time()
# 更新随时间变化的矩阵
dt = i if idx == 0 else deltaTime # Time Step between Filter Steps
A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
G = np.matrix([[1 / 2.0 * dt ** 2],
[1 / 2.0 * dt ** 2],
[1 / 2.0 * dt ** 2],
[dt],
[dt],
[dt],
[1.0],
[1.0],
[1.0]])
Q = G * G.T * sa ** 2
# Time Update (Prediction)
# ========================
# Project the state ahead
x = A*x + B*u
# Project the error covariance ahead
P = A*P*A.T + Q
# Measurement Update (Correction)
# ===============================
# Compute the Kalman Gain
S = H*P*H.T + R
K = (P*H.T) * np.linalg.pinv(S)
# Update the estimate via z
Z = measurements[:,step].reshape(H.shape[0],1)
y = Z - (H*x) # Innovation or Residual
x = x + (K*y)
# Update the error covariance
P = (I - (K*H))*P
# Save states for Plotting
xt.append(float(x[0]))
yt.append(float(x[1]))
zt.append(float(x[2]))
frameEnd = time.time()
deltaTime = frameEnd - frameBegin
totalTime += (frameEnd - computeBegin)
# distance calculate
dist = np.sqrt(((Xr-xt)**2 + (Yr-yt)**2 + (Zr-zt)**2).mean())
print('%.3f,%.8f,%.3f' % (i, totalTime, dist))
# 还原初始设置
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
rp = 0.01 # Noise of Position Measurement
R = np.matrix([[rp, 0.0, 0.0],
[0.0, rp, 0.0],
[0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]])
I = np.eye(9)
sp = 0.01 # Sigma for position noise
Xm = Xr + sp * (np.random.randn(m))
Ym = Yr + sp * (np.random.randn(m))
Zm = Zr + sp * (np.random.randn(m))
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
# Preallocation for Plotting
xt = []
yt = []
zt = []
|
normal
|
{
"blob_id": "d0adbcd60727c2c68e06dc5e796f2676f927c45a",
"index": 4593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf.head()\n<mask token>\nprint(m)\n<mask token>\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]\n for idx, step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n dt = i if idx == 0 else deltaTime\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,\n 0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0\n ], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, \n 0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *\n dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])\n Q = G * G.T * sa ** 2\n x = A * x + B * u\n P = A * P * A.T + Q\n S = H * P * H.T + R\n K = P * H.T * np.linalg.pinv(S)\n Z = measurements[:, step].reshape(H.shape[0], 1)\n y = Z - H * x\n x = x + K * y\n P = (I - K * H) * P\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += frameEnd - computeBegin\n dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n totalTime = 0.0\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0]])\n rp = 0.01\n R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01\n Xm = Xr + sp * np.random.randn(m)\n Ym = Yr + sp * np.random.randn(m)\n Zm = Zr + sp * np.random.randn(m)\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][\n 0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n xt = []\n yt = []\n zt = []\n",
"step-3": "<mask token>\ncsvfilename = 'data/0901/exp1/xiaoxiong.csv'\ndf = pd.read_csv(csvfilename, header=None, names=['abstime', 'posx', 'posy',\n 'posz', 'roty', 'rotx', 'anim'])\ndf.head()\nXr = df['posx'].values\nYr = df['posy'].values\nZr = df['posz'].values\nm = len(Xr)\nprint(m)\ndeltaTime = 0.0\ntotalTime = 0.0\nP = 1.0 * np.eye(9)\nH = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0]])\nrp = 0.01\nR = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\nsa = 0.05\nu = 0.0\nB = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])\nI = np.eye(9)\nsp = 0.01\nXm = Xr + sp * np.random.randn(m)\nYm = Yr + sp * np.random.randn(m)\nZm = Zr + sp * np.random.randn(m)\nmeasurements = np.vstack((Xm, Ym, Zm))\nx = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\nxt = []\nyt = []\nzt = []\nmean = [(i * 0.01) for i in range(1, 21)]\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]\n for idx, step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n dt = i if idx == 0 else deltaTime\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,\n 0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0\n ], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, \n 0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *\n dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])\n Q = G * G.T * sa ** 2\n x = A * x + B * u\n P = A * P * A.T + Q\n S = H * P * H.T + R\n K = P * H.T * np.linalg.pinv(S)\n Z = measurements[:, step].reshape(H.shape[0], 1)\n y = Z - H * x\n x = x + K * y\n P = (I - K * H) * P\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += frameEnd - computeBegin\n dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n totalTime = 0.0\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0]])\n rp = 0.01\n R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01\n Xm = Xr + sp * np.random.randn(m)\n Ym = Yr + sp * np.random.randn(m)\n Zm = Zr + sp * np.random.randn(m)\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][\n 0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n xt = []\n yt = []\n zt = []\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\nimport random\ncsvfilename = 'data/0901/exp1/xiaoxiong.csv'\ndf = pd.read_csv(csvfilename, header=None, names=['abstime', 'posx', 'posy',\n 'posz', 'roty', 'rotx', 'anim'])\ndf.head()\nXr = df['posx'].values\nYr = df['posy'].values\nZr = df['posz'].values\nm = len(Xr)\nprint(m)\ndeltaTime = 0.0\ntotalTime = 0.0\nP = 1.0 * np.eye(9)\nH = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0]])\nrp = 0.01\nR = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\nsa = 0.05\nu = 0.0\nB = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])\nI = np.eye(9)\nsp = 0.01\nXm = Xr + sp * np.random.randn(m)\nYm = Yr + sp * np.random.randn(m)\nZm = Zr + sp * np.random.randn(m)\nmeasurements = np.vstack((Xm, Ym, Zm))\nx = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\nxt = []\nyt = []\nzt = []\nmean = [(i * 0.01) for i in range(1, 21)]\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]\n for idx, step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n dt = i if idx == 0 else deltaTime\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,\n 0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0\n ], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, \n 0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *\n dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])\n Q = G * G.T * sa ** 2\n x = A * x + B * u\n P = A * P * A.T + Q\n S = H * P * H.T + R\n K = P * H.T * np.linalg.pinv(S)\n Z = measurements[:, step].reshape(H.shape[0], 1)\n y = Z - H * x\n x = x + K * y\n P = (I - K * H) * P\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += frameEnd - computeBegin\n dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n totalTime = 0.0\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0]])\n rp = 0.01\n R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01\n Xm = Xr + sp * np.random.randn(m)\n Ym = Yr + sp * np.random.randn(m)\n Zm = Zr + sp * np.random.randn(m)\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][\n 0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n xt = []\n yt = []\n zt = []\n",
"step-5": "\n# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\nimport random\n\n\ncsvfilename = 'data/0901/exp1/xiaoxiong.csv'\ndf = pd.read_csv(csvfilename, header=None,\n names=['abstime','posx','posy','posz','roty','rotx','anim'])\n # skiprows=1, skipfooter=1)\ndf.head()\n\nXr=df['posx'].values\nYr=df['posy'].values\nZr=df['posz'].values\nm=len(Xr)\nprint(m)\n\ndeltaTime = 0.0\ntotalTime = 0.0\n\nP = 1.0*np.eye(9)\nH = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\nrp = 0.01 # Noise of Position Measurement\nR = np.matrix([[rp, 0.0, 0.0],\n [0.0, rp, 0.0],\n [0.0, 0.0, rp]])\nsa = 0.05\nu = 0.0\nB = np.matrix([[0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0]])\nI = np.eye(9)\nsp= 0.01 # Sigma for position noise\nXm = Xr + sp * (np.random.randn(m))\nYm = Yr + sp * (np.random.randn(m))\nZm = Zr + sp * (np.random.randn(m))\nmeasurements = np.vstack((Xm,Ym,Zm))\nx = np.matrix([measurements[0][0], measurements[1][0],measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n# Preallocation for Plotting\nxt = []\nyt = []\nzt = []\n\nmean = [i*0.01 for i in range(1,21)]\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [random.random() * 0.01 + (i - 0.005) for _ in range(m)]\n for idx,step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n # 更新随时间变化的矩阵\n dt = i if idx == 0 else deltaTime # Time Step between Filter Steps\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n\n G = np.matrix([[1 / 2.0 * dt ** 2],\n [1 / 2.0 * dt ** 2],\n [1 / 2.0 * dt ** 2],\n [dt],\n [dt],\n [dt],\n [1.0],\n [1.0],\n [1.0]])\n Q = G * G.T * sa ** 2\n\n # Time Update (Prediction)\n # ========================\n # Project the state ahead\n x = A*x + B*u\n\n # Project the error covariance ahead\n P = A*P*A.T + Q\n\n # Measurement Update (Correction)\n # ===============================\n # Compute the Kalman Gain\n S = H*P*H.T + R\n K = (P*H.T) * np.linalg.pinv(S)\n\n # Update the estimate via z\n Z = measurements[:,step].reshape(H.shape[0],1)\n y = Z - (H*x) # Innovation or Residual\n x = x + (K*y)\n\n # Update the error covariance\n P = (I - (K*H))*P\n\n # Save states for Plotting\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += (frameEnd - computeBegin)\n\n # distance calculate\n dist = np.sqrt(((Xr-xt)**2 + (Yr-yt)**2 + (Zr-zt)**2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n # 还原初始设置\n totalTime = 0.0\n\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n rp = 0.01 # Noise of Position Measurement\n R = np.matrix([[rp, 0.0, 0.0],\n [0.0, rp, 0.0],\n [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01 # Sigma for position noise\n Xm = Xr + sp * (np.random.randn(m))\n Ym = Yr + sp * (np.random.randn(m))\n Zm = Zr + sp * (np.random.randn(m))\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n # Preallocation for Plotting\n xt = []\n yt = []\n zt = []\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
from os.path import join, exists
import os
import fitz
from tqdm import tqdm
from pathlib import Path
import tempfile
def download_pdf(url, folder, name):
r = requests.get(url, allow_redirects=True)
file_path = join(folder, name + ".pdf")
open(file_path, 'wb').write(r.content)
return file_path
def download_pdf_to_temp(url):
new_file, filename = tempfile.mkstemp()
r = requests.get(url, allow_redirects=True)
os.write(new_file, r.content)
return new_file, filename
def save_pdf_image(file_path, dest_path):
Path(dest_path).mkdir(parents=True, exist_ok=True)
doc = fitz.open(file_path)
i = 1
images_name = list()
xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not(xref[0] in [10, 25, 26])])
maximum_digits = len(str(len(xrefs)*3))
for xref in tqdm(xrefs):
pix = fitz.Pixmap(doc, xref)
index = f'{i:0{maximum_digits}}'
img_name = "image--{}.jpg".format(index)
img_path = join(dest_path, img_name)
if not(exists(img_path)):
if pix.n >= 5:
pix = fitz.Pixmap(fitz.csRGB, pix)
pix.writeImage(img_path)
images_name.append(xref)
i += 3
def pdf_2_images(url, dest_path):
new_file, filename = download_pdf_to_temp(url)
save_pdf_image(filename, dest_path)
os.close(new_file)
|
normal
|
{
"blob_id": "c6113088f45951bc4c787760b6ca0138265fb83f",
"index": 9966,
"step-1": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\n<mask token>\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-2": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\n<mask token>\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-3": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref\n [0] in [10, 25, 26]])\n maximum_digits = len(str(len(xrefs) * 3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = 'image--{}.jpg'.format(index)\n img_path = join(dest_path, img_name)\n if not exists(img_path):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-4": "import requests\nfrom os.path import join, exists\nimport os\nimport fitz\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport tempfile\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref\n [0] in [10, 25, 26]])\n maximum_digits = len(str(len(xrefs) * 3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = 'image--{}.jpg'.format(index)\n img_path = join(dest_path, img_name)\n if not exists(img_path):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-5": "import requests\nfrom os.path import join, exists\nimport os\nimport fitz\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport tempfile\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + \".pdf\")\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not(xref[0] in [10, 25, 26])])\n maximum_digits = len(str(len(xrefs)*3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = \"image--{}.jpg\".format(index)\n img_path = join(dest_path, img_name)\n if not(exists(img_path)):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import Ploneboard
import PloneboardForum
import PloneboardConversation
import PloneboardComment
|
normal
|
{
"blob_id": "abdf5aee77ee879c50d0e605d5fd95e28a7ef7aa",
"index": 5631,
"step-1": "<mask token>\n",
"step-2": "import Ploneboard\nimport PloneboardForum\nimport PloneboardConversation\nimport PloneboardComment\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class BaseTestCloudAuth:
"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""
ACCESS_TOKEN = ''
SCOPE_ACCESS_TOKEN = ''
ID_TOKEN = ''
TESTCLIENT = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseTestCloudAuth:
"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""
ACCESS_TOKEN = ''
SCOPE_ACCESS_TOKEN = ''
ID_TOKEN = ''
TESTCLIENT = None
<|reserved_special_token_0|>
def decode_token(token):
header, payload, *rest = token.split('.')
header += f"{'=' * (len(header) % 4)}"
payload += f"{'=' * (len(payload) % 4)}"
header = json.loads(base64.b64decode(header).decode())
payload = json.loads(base64.b64decode(payload).decode())
return header, payload, rest
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseTestCloudAuth:
"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""
ACCESS_TOKEN = ''
SCOPE_ACCESS_TOKEN = ''
ID_TOKEN = ''
TESTCLIENT = None
def assert_get_response(client, endpoint, token, status_code, detail=''):
if token:
headers = {'authorization': f'Bearer {token}'}
else:
headers = {}
response = client.get(endpoint, headers=headers)
assert response.status_code == status_code, f'{response.json()}'
if detail:
assert response.json().get('detail', '') == detail
return response
def decode_token(token):
header, payload, *rest = token.split('.')
header += f"{'=' * (len(header) % 4)}"
payload += f"{'=' * (len(payload) % 4)}"
header = json.loads(base64.b64decode(header).decode())
payload = json.loads(base64.b64decode(payload).decode())
return header, payload, rest
<|reserved_special_token_1|>
import base64
import json
class BaseTestCloudAuth:
"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""
ACCESS_TOKEN = ''
SCOPE_ACCESS_TOKEN = ''
ID_TOKEN = ''
TESTCLIENT = None
def assert_get_response(client, endpoint, token, status_code, detail=''):
if token:
headers = {'authorization': f'Bearer {token}'}
else:
headers = {}
response = client.get(endpoint, headers=headers)
assert response.status_code == status_code, f'{response.json()}'
if detail:
assert response.json().get('detail', '') == detail
return response
def decode_token(token):
header, payload, *rest = token.split('.')
header += f"{'=' * (len(header) % 4)}"
payload += f"{'=' * (len(payload) % 4)}"
header = json.loads(base64.b64decode(header).decode())
payload = json.loads(base64.b64decode(payload).decode())
return header, payload, rest
<|reserved_special_token_1|>
import base64
import json
class BaseTestCloudAuth:
"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""
ACCESS_TOKEN = ""
SCOPE_ACCESS_TOKEN = ""
ID_TOKEN = ""
TESTCLIENT = None
def assert_get_response(client, endpoint, token, status_code, detail=""):
if token:
headers = {"authorization": f"Bearer {token}"}
else:
headers = {}
response = client.get(endpoint, headers=headers)
assert response.status_code == status_code, f"{response.json()}"
if detail:
assert response.json().get("detail", "") == detail
return response
def decode_token(token):
header, payload, *rest = token.split(".")
header += f"{'=' * (len(header) % 4)}"
payload += f"{'=' * (len(payload) % 4)}"
header = json.loads(base64.b64decode(header).decode())
payload = json.loads(base64.b64decode(payload).decode())
return header, payload, rest
|
flexible
|
{
"blob_id": "9a2b5b9b2b2f9532b5d0749147aca644c2ac26e3",
"index": 2878,
"step-1": "<mask token>\n\n\nclass BaseTestCloudAuth:\n \"\"\"\n Required\n setup: initialize test case\n teardown: del items for test\n decode: check decoded token and assigned info\n \"\"\"\n ACCESS_TOKEN = ''\n SCOPE_ACCESS_TOKEN = ''\n ID_TOKEN = ''\n TESTCLIENT = None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseTestCloudAuth:\n \"\"\"\n Required\n setup: initialize test case\n teardown: del items for test\n decode: check decoded token and assigned info\n \"\"\"\n ACCESS_TOKEN = ''\n SCOPE_ACCESS_TOKEN = ''\n ID_TOKEN = ''\n TESTCLIENT = None\n\n\n<mask token>\n\n\ndef decode_token(token):\n header, payload, *rest = token.split('.')\n header += f\"{'=' * (len(header) % 4)}\"\n payload += f\"{'=' * (len(payload) % 4)}\"\n header = json.loads(base64.b64decode(header).decode())\n payload = json.loads(base64.b64decode(payload).decode())\n return header, payload, rest\n",
"step-3": "<mask token>\n\n\nclass BaseTestCloudAuth:\n \"\"\"\n Required\n setup: initialize test case\n teardown: del items for test\n decode: check decoded token and assigned info\n \"\"\"\n ACCESS_TOKEN = ''\n SCOPE_ACCESS_TOKEN = ''\n ID_TOKEN = ''\n TESTCLIENT = None\n\n\ndef assert_get_response(client, endpoint, token, status_code, detail=''):\n if token:\n headers = {'authorization': f'Bearer {token}'}\n else:\n headers = {}\n response = client.get(endpoint, headers=headers)\n assert response.status_code == status_code, f'{response.json()}'\n if detail:\n assert response.json().get('detail', '') == detail\n return response\n\n\ndef decode_token(token):\n header, payload, *rest = token.split('.')\n header += f\"{'=' * (len(header) % 4)}\"\n payload += f\"{'=' * (len(payload) % 4)}\"\n header = json.loads(base64.b64decode(header).decode())\n payload = json.loads(base64.b64decode(payload).decode())\n return header, payload, rest\n",
"step-4": "import base64\nimport json\n\n\nclass BaseTestCloudAuth:\n \"\"\"\n Required\n setup: initialize test case\n teardown: del items for test\n decode: check decoded token and assigned info\n \"\"\"\n ACCESS_TOKEN = ''\n SCOPE_ACCESS_TOKEN = ''\n ID_TOKEN = ''\n TESTCLIENT = None\n\n\ndef assert_get_response(client, endpoint, token, status_code, detail=''):\n if token:\n headers = {'authorization': f'Bearer {token}'}\n else:\n headers = {}\n response = client.get(endpoint, headers=headers)\n assert response.status_code == status_code, f'{response.json()}'\n if detail:\n assert response.json().get('detail', '') == detail\n return response\n\n\ndef decode_token(token):\n header, payload, *rest = token.split('.')\n header += f\"{'=' * (len(header) % 4)}\"\n payload += f\"{'=' * (len(payload) % 4)}\"\n header = json.loads(base64.b64decode(header).decode())\n payload = json.loads(base64.b64decode(payload).decode())\n return header, payload, rest\n",
"step-5": "import base64\nimport json\n\n\nclass BaseTestCloudAuth:\n \"\"\"\n Required\n setup: initialize test case\n teardown: del items for test\n decode: check decoded token and assigned info\n \"\"\"\n\n ACCESS_TOKEN = \"\"\n SCOPE_ACCESS_TOKEN = \"\"\n ID_TOKEN = \"\"\n TESTCLIENT = None\n\n\ndef assert_get_response(client, endpoint, token, status_code, detail=\"\"):\n if token:\n headers = {\"authorization\": f\"Bearer {token}\"}\n else:\n headers = {}\n response = client.get(endpoint, headers=headers)\n assert response.status_code == status_code, f\"{response.json()}\"\n if detail:\n assert response.json().get(\"detail\", \"\") == detail\n return response\n\n\ndef decode_token(token):\n header, payload, *rest = token.split(\".\")\n\n header += f\"{'=' * (len(header) % 4)}\"\n payload += f\"{'=' * (len(payload) % 4)}\"\n header = json.loads(base64.b64decode(header).decode())\n payload = json.loads(base64.b64decode(payload).decode())\n return header, payload, rest\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Person:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Person:
<|reserved_special_token_0|>
def GetName(self):
return self.fname + ' ' + self.lname
<|reserved_special_token_1|>
class Person:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def GetName(self):
return self.fname + ' ' + self.lname
<|reserved_special_token_1|>
class Person:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def GetName(self):
return (self.fname + ' ' + self.lname)
|
flexible
|
{
"blob_id": "ff358136bc96fa7f3eb41d019ddfd10fc4db8f0d",
"index": 5558,
"step-1": "<mask token>\n",
"step-2": "class Person:\n <mask token>\n <mask token>\n",
"step-3": "class Person:\n <mask token>\n\n def GetName(self):\n return self.fname + ' ' + self.lname\n",
"step-4": "class Person:\n\n def __init__(self, fname, lname):\n self.fname = fname\n self.lname = lname\n\n def GetName(self):\n return self.fname + ' ' + self.lname\n",
"step-5": "class Person:\n def __init__(self, fname, lname):\n self.fname = fname\n self.lname = lname\n\n def GetName(self):\n return (self.fname + ' ' + self.lname)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# ================================================== #
# MAIN WINDOW #
# ================================================== #
# Author: Brady Hammond #
# Created: 11/21/2017 #
# Last Edited: N/A #
# Last Edited By: N/A #
# ================================================== #
# FILE SETUP #
# ================================================== #
# Import statements
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QMessageBox
from src import FileDialog, SentimentAnalyzer
# ================================================== #
# CLASS DEFINITION #
# ================================================== #
# UIMainWindow class definition
class UIMainWindow(object):
# Define __init__ function
def __init__(self):
# Create main window
font = QtGui.QFont()
font.setFamily("Myriad Pro")
font.setPointSize(14)
self.main_window = QtWidgets.QWidget()
self.main_window.setFont(font)
self.main_window.setObjectName("main_window")
self.main_window.setWindowModality(QtCore.Qt.WindowModal)
self.main_window.resize(450, 460)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.main_window.sizePolicy().hasHeightForWidth())
self.main_window.setSizePolicy(size_policy)
self.main_window.setMinimumSize(QtCore.QSize(450, 460))
self.main_window.setMaximumSize(QtCore.QSize(450, 460))
self.main_window.setBaseSize(QtCore.QSize(450, 460))
# Create branding icon
self.branding_icon = QtWidgets.QLabel(self.main_window)
self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))
self.branding_icon.setText("")
self.branding_icon.setPixmap(QtGui.QPixmap("../images/senticompare_logo.png"))
self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)
self.branding_icon.setObjectName("branding_icon")
# Create branding label
self.branding_label = QtWidgets.QLabel(self.main_window)
self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.branding_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Optima")
font.setPointSize(50)
self.branding_label.setFont(font)
self.branding_label.setObjectName("branding_label")
# Create first horizontal layout
self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)
self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, 430, 50))
self.horizontal_layout_widget_1.setObjectName("horizontal_layout_widget_1")
self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_1)
self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_1.setObjectName("horizontal_layout_1")
# Create run button
self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)
self.run_button.setObjectName("run_button")
self.run_button.clicked.connect(self.run)
# Add run button to first horizontal layout
self.horizontal_layout_1.addWidget(self.run_button)
# Create quit button
self.quit_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)
self.quit_button.setObjectName("quit_button")
self.quit_button.clicked.connect(self.main_window.close)
# Add quit button to first horizontal layout
self.horizontal_layout_1.addWidget(self.quit_button)
# Create file selection tab
self.select_files_tab = QtWidgets.QWidget()
self.select_files_tab.setObjectName("select_files_tab")
# Create second horizontal layout
self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.select_files_tab)
self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, 230, 50))
self.horizontal_layout_widget_2.setObjectName("horizontal_layout_widget_2")
self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_2)
self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_2.setObjectName("horizontal_layout_2")
# Create input/output tab window
font.setFamily("Myriad Pro")
font.setPointSize(12)
self.input_output_box = QtWidgets.QTabWidget(self.main_window)
self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))
self.input_output_box.setFont(font)
self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)
self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)
self.input_output_box.setTabsClosable(False)
self.input_output_box.setObjectName("input_output_box")
# Create file view
self.file_view = QtWidgets.QListView(self.select_files_tab)
self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))
self.file_view.setObjectName("file_view")
# Create file view model
self.file_view_model = QStandardItemModel(self.file_view)
# Add file view model to file view
self.file_view.setModel(self.file_view_model)
# Show file view
self.file_view.show()
# Add file selection tab to input/output tab window
self.input_output_box.addTab(self.select_files_tab, "")
# Create add button
self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)
self.add_button.setFont(font)
self.add_button.setObjectName("add_button")
self.add_button.clicked.connect(self.selectFiles)
# Add add button to second horizontal layout
self.horizontal_layout_2.addWidget(self.add_button)
# Create delete button
self.delete_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)
self.delete_button.setFont(font)
self.delete_button.setObjectName("delete_button")
self.delete_button.clicked.connect(self.removeFiles)
# Add delete button to second horizontal layout
self.horizontal_layout_2.addWidget(self.delete_button)
# Create manual input tab
self.manual_input_tab = QtWidgets.QWidget()
self.manual_input_tab.setObjectName("manual_input_tab")
# Create text input
self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)
self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.text_input.setObjectName("text_input")
# Add text input to manual input tab
self.input_output_box.addTab(self.manual_input_tab, "")
# Create results tab
self.results_tab = QtWidgets.QWidget()
self.results_tab.setObjectName("results_tab")
# Create results scroll box
self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)
self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.results_scroll_box.setWidgetResizable(True)
self.results_scroll_box.setObjectName("results_scroll_box")
# Create results content
self.results_content = QtWidgets.QWidget()
self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))
self.results_content.setObjectName("results_content")
self.results_scroll_box.setWidget(self.results_content)
# Create results content text
self.results_content_text = QtWidgets.QTextEdit(self.results_content)
self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))
self.results_content_text.setReadOnly(True)
self.results_content_text.setObjectName("results_content_text")
# Add results tab to input/output tab window
self.input_output_box.addTab(self.results_tab, "")
# Disable results tab
self.input_output_box.setTabEnabled(2, False)
# Create first group box
font.setPointSize(14)
self.group_box_1 = QtWidgets.QGroupBox(self.main_window)
self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))
self.group_box_1.setFont(font)
self.group_box_1.setTitle("")
self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_1.setFlat(False)
self.group_box_1.setCheckable(False)
self.group_box_1.setObjectName("group_box_1")
# Create first vertical layout
self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)
self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))
self.vertical_layout_widget_1.setObjectName("vertical_layout_widget_1")
self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.vertical_layout_widget_1)
self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)
self.vertical_layout_1.setObjectName("vertical_layout_1")
# Create pronoun checkbox
self.pronoun_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.pronoun_checkbox.setFont(font)
self.pronoun_checkbox.setObjectName("pronoun_checkbox")
# Add pronoun checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.pronoun_checkbox)
# Create lexical checkbox
self.lexical_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.lexical_checkbox.setFont(font)
self.lexical_checkbox.setObjectName("lexical_checkbox")
# Add lexical checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.lexical_checkbox)
# Create rule based checkbox
self.rule_based_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.rule_based_checkbox.setFont(font)
self.rule_based_checkbox.setObjectName("rule_based_checkbox")
# Add rule_based checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.rule_based_checkbox)
# Create machine learning checkbox
self.machine_learning_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.machine_learning_checkbox.setFont(font)
self.machine_learning_checkbox.setObjectName("machine_learning_checkbox")
# Add machine learning checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.machine_learning_checkbox)
# Create help scroll box
self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)
self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))
self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)
self.help_scroll_box.setWidgetResizable(True)
self.help_scroll_box.setObjectName("help_scroll_box")
# Create help content
self.help_content = QtWidgets.QWidget()
self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))
self.help_content.setObjectName("help_content")
self.help_scroll_box.setWidget(self.help_content)
# Create selected files variable
self.selected_files = {}
# Set current tab
self.input_output_box.setCurrentIndex(0)
# Retranslate UI
self.retranslateUI()
# Connect UI slots
QtCore.QMetaObject.connectSlotsByName(self.main_window)
# ============================================== #
# Define retranslateUI function
def retranslateUI(self):
# Add text to ui elements
_translate = QtCore.QCoreApplication.translate
self.main_window.setWindowTitle(_translate("main_window", "SentiCompare"))
self.add_button.setText(_translate("main_window", "Add"))
self.delete_button.setText(_translate("main_window", "Delete"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.select_files_tab),
_translate("main_window", "Select Files"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.manual_input_tab),
_translate("main_window", "Manual Input"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.results_tab),
_translate("main_window", "Results"))
self.run_button.setText(_translate("main_window", "Run"))
self.quit_button.setText(_translate("main_window", "Quit"))
self.pronoun_checkbox.setText(_translate("main_window", "Pronoun Usage"))
self.lexical_checkbox.setText(_translate("main_window", "Lexical"))
self.rule_based_checkbox.setText(_translate("main_window", "Rule Based"))
self.machine_learning_checkbox.setText(_translate("main_window", "Machine Learning"))
self.branding_label.setText(_translate("main_window", "SentiCompare"))
# ============================================== #
# Define showWindow function
def showWindow(self):
self.main_window.show()
# ============================================== #
# Define selectFiles function
def selectFiles(self):
# Create file dialog
file_dialog = FileDialog(self.main_window)
file_dialog.setFilters(["Text files (*.txt)"])
file_dialog.setDefaultFilterIndex = 0
file_dialog.setDefaultDirectory(os.path.expanduser('~'))
file_dialog.exec()
# Return if nothing was selected
if file_dialog.getPath() == '':
return
# Add files from selected directory to file list
elif file_dialog.getFilename()[2] == '':
for file in os.listdir(file_dialog.getPath()):
if file.endswith('.txt') and not file.startswith('.'):
file_path = os.path.join(file_dialog.getPath(), file)
if file_path not in self.selected_files:
self.selected_files[file] = file_path
item = QStandardItem(file)
item.setCheckable(True)
self.file_view_model.appendRow(item)
# Add selected file to list
else:
if file_dialog.getPath() not in self.selected_files:
self.selected_files[file_dialog.getFilename()[1]] = file_dialog.getPath()
item = QStandardItem(file_dialog.getFilename()[1])
item.setCheckable(True)
self.file_view_model.appendRow(item)
# ============================================== #
# Define removeFiles function
def removeFiles(self):
# Remove all checked files
for i in range(self.file_view_model.rowCount() - 1, -1, -1):
if self.file_view_model.item(i).checkState():
filename = self.file_view_model.item(i).text()
del self.selected_files[filename]
self.file_view_model.removeRow(i)
# ============================================== #
# Define run function
def run(self):
# Check if an analysis method is selected
if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.isChecked() or
self.rule_based_checkbox.isChecked() or self.machine_learning_checkbox.isChecked()):
# Create and show an error message
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Missing Parameters")
message_box.setText("You haven't selected any methods of sentiment analysis. Please select at least one " +
"method from the list of options.")
message_box.exec_()
return
# Check if the current tab is valid
if self.input_output_box.currentIndex() == 2:
# Create and show error message
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Select Input")
message_box.setText("You must be on the \"Select Files\" page or the \"Manual Input\" page to run " +
"an analysis. Please select one of those pages and try again.")
message_box.exec_()
return
else:
progress_bar = QtWidgets.QProgressDialog("Running Sentiment Analysis...", "Cancel", 0, 100, self.main_window)
progress_bar.setValue(0)
progress_bar.setCancelButton(None)
progress_bar.setWindowModality(QtCore.Qt.WindowModal)
progress_bar.resize(400, 50)
progress_bar.show()
# Analyze selected files
if self.input_output_box.currentIndex() == 0:
sentiment_analyzer = SentimentAnalyzer(self.selected_files, progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(),
rule_based=self.rule_based_checkbox.isChecked(),
machine_learning=self.machine_learning_checkbox.isChecked())
# Analyze manual input
else:
sentiment_analyzer = SentimentAnalyzer(self.text_input.toPlainText(), progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(),
rule_based=self.rule_based_checkbox.isChecked(),
machine_learning=self.machine_learning_checkbox.isChecked())
results = sentiment_analyzer.runAnalyses()
progress_bar.close()
if results:
self.results_content_text.setText(results)
self.input_output_box.setTabEnabled(2, True)
self.input_output_box.setCurrentIndex(2)
else:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Missing Input")
message_box.setText("You haven't added any input to analyze. Please select one or more files or " +
"input some data manually.")
message_box.exec_()
return
# ================================================== #
# EOF #
# ================================================== #
|
normal
|
{
"blob_id": "a555226b14223dca688d10b811eb36fb229360ce",
"index": 2457,
"step-1": "<mask token>\n\n\nclass UIMainWindow(object):\n <mask token>\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass UIMainWindow(object):\n\n def __init__(self):\n font = QtGui.QFont()\n font.setFamily('Myriad Pro')\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName('main_window')\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().\n hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText('')\n self.branding_icon.setPixmap(QtGui.QPixmap(\n '../images/senticompare_logo.png'))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.\n AlignVCenter)\n self.branding_icon.setObjectName('branding_icon')\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily('Optima')\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName('branding_label')\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, \n 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\n 'horizontal_layout_widget_1')\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName('horizontal_layout_1')\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1\n )\n self.run_button.setObjectName('run_button')\n self.run_button.clicked.connect(self.run)\n self.horizontal_layout_1.addWidget(self.run_button)\n self.quit_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_1)\n self.quit_button.setObjectName('quit_button')\n self.quit_button.clicked.connect(self.main_window.close)\n self.horizontal_layout_1.addWidget(self.quit_button)\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName('select_files_tab')\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.\n select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, \n 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\n 'horizontal_layout_widget_2')\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName('horizontal_layout_2')\n font.setFamily('Myriad Pro')\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.\n PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName('input_output_box')\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName('file_view')\n self.file_view_model = QStandardItemModel(self.file_view)\n self.file_view.setModel(self.file_view_model)\n self.file_view.show()\n self.input_output_box.addTab(self.select_files_tab, '')\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2\n )\n self.add_button.setFont(font)\n self.add_button.setObjectName('add_button')\n self.add_button.clicked.connect(self.selectFiles)\n self.horizontal_layout_2.addWidget(self.add_button)\n self.delete_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName('delete_button')\n self.delete_button.clicked.connect(self.removeFiles)\n self.horizontal_layout_2.addWidget(self.delete_button)\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName('manual_input_tab')\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName('text_input')\n self.input_output_box.addTab(self.manual_input_tab, '')\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName('results_tab')\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName('results_scroll_box')\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName('results_content')\n self.results_scroll_box.setWidget(self.results_content)\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName('results_content_text')\n self.input_output_box.addTab(self.results_tab, '')\n self.input_output_box.setTabEnabled(2, False)\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle('')\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName('group_box_1')\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.\n vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName('vertical_layout_1')\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName('pronoun_checkbox')\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n self.lexical_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName('lexical_checkbox')\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName('rule_based_checkbox')\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\n 'machine_learning_checkbox')\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName('help_scroll_box')\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName('help_content')\n self.help_scroll_box.setWidget(self.help_content)\n self.selected_files = {}\n self.input_output_box.setCurrentIndex(0)\n self.retranslateUI()\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n <mask token>\n\n def run(self):\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.\n isChecked() or self.rule_based_checkbox.isChecked() or self.\n machine_learning_checkbox.isChecked()):\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Parameters')\n message_box.setText(\n \"You haven't selected any methods of sentiment analysis. Please select at least one \"\n + 'method from the list of options.')\n message_box.exec_()\n return\n if self.input_output_box.currentIndex() == 2:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Select Input')\n message_box.setText(\n 'You must be on the \"Select Files\" page or the \"Manual Input\" page to run '\n +\n 'an analysis. Please select one of those pages and try again.')\n message_box.exec_()\n return\n else:\n progress_bar = QtWidgets.QProgressDialog(\n 'Running Sentiment Analysis...', 'Cancel', 0, 100, self.\n main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files,\n progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(), rule_based=\n self.rule_based_checkbox.isChecked(), machine_learning=\n self.machine_learning_checkbox.isChecked())\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.\n toPlainText(), progress_bar, pronoun=self.\n pronoun_checkbox.isChecked(), lexical=self.\n lexical_checkbox.isChecked(), rule_based=self.\n rule_based_checkbox.isChecked(), machine_learning=self.\n machine_learning_checkbox.isChecked())\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Input')\n message_box.setText(\n \"You haven't added any input to analyze. Please select one or more files or \"\n + 'input some data manually.')\n message_box.exec_()\n return\n",
"step-3": "<mask token>\n\n\nclass UIMainWindow(object):\n\n def __init__(self):\n font = QtGui.QFont()\n font.setFamily('Myriad Pro')\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName('main_window')\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().\n hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText('')\n self.branding_icon.setPixmap(QtGui.QPixmap(\n '../images/senticompare_logo.png'))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.\n AlignVCenter)\n self.branding_icon.setObjectName('branding_icon')\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily('Optima')\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName('branding_label')\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, \n 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\n 'horizontal_layout_widget_1')\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName('horizontal_layout_1')\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1\n )\n self.run_button.setObjectName('run_button')\n self.run_button.clicked.connect(self.run)\n self.horizontal_layout_1.addWidget(self.run_button)\n self.quit_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_1)\n self.quit_button.setObjectName('quit_button')\n self.quit_button.clicked.connect(self.main_window.close)\n self.horizontal_layout_1.addWidget(self.quit_button)\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName('select_files_tab')\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.\n select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, \n 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\n 'horizontal_layout_widget_2')\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName('horizontal_layout_2')\n font.setFamily('Myriad Pro')\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.\n PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName('input_output_box')\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName('file_view')\n self.file_view_model = QStandardItemModel(self.file_view)\n self.file_view.setModel(self.file_view_model)\n self.file_view.show()\n self.input_output_box.addTab(self.select_files_tab, '')\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2\n )\n self.add_button.setFont(font)\n self.add_button.setObjectName('add_button')\n self.add_button.clicked.connect(self.selectFiles)\n self.horizontal_layout_2.addWidget(self.add_button)\n self.delete_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName('delete_button')\n self.delete_button.clicked.connect(self.removeFiles)\n self.horizontal_layout_2.addWidget(self.delete_button)\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName('manual_input_tab')\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName('text_input')\n self.input_output_box.addTab(self.manual_input_tab, '')\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName('results_tab')\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName('results_scroll_box')\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName('results_content')\n self.results_scroll_box.setWidget(self.results_content)\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName('results_content_text')\n self.input_output_box.addTab(self.results_tab, '')\n self.input_output_box.setTabEnabled(2, False)\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle('')\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName('group_box_1')\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.\n vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName('vertical_layout_1')\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName('pronoun_checkbox')\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n self.lexical_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName('lexical_checkbox')\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName('rule_based_checkbox')\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\n 'machine_learning_checkbox')\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName('help_scroll_box')\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName('help_content')\n self.help_scroll_box.setWidget(self.help_content)\n self.selected_files = {}\n self.input_output_box.setCurrentIndex(0)\n self.retranslateUI()\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n def removeFiles(self):\n for i in range(self.file_view_model.rowCount() - 1, -1, -1):\n if self.file_view_model.item(i).checkState():\n filename = self.file_view_model.item(i).text()\n del self.selected_files[filename]\n self.file_view_model.removeRow(i)\n\n def run(self):\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.\n isChecked() or self.rule_based_checkbox.isChecked() or self.\n machine_learning_checkbox.isChecked()):\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Parameters')\n message_box.setText(\n \"You haven't selected any methods of sentiment analysis. Please select at least one \"\n + 'method from the list of options.')\n message_box.exec_()\n return\n if self.input_output_box.currentIndex() == 2:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Select Input')\n message_box.setText(\n 'You must be on the \"Select Files\" page or the \"Manual Input\" page to run '\n +\n 'an analysis. Please select one of those pages and try again.')\n message_box.exec_()\n return\n else:\n progress_bar = QtWidgets.QProgressDialog(\n 'Running Sentiment Analysis...', 'Cancel', 0, 100, self.\n main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files,\n progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(), rule_based=\n self.rule_based_checkbox.isChecked(), machine_learning=\n self.machine_learning_checkbox.isChecked())\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.\n toPlainText(), progress_bar, pronoun=self.\n pronoun_checkbox.isChecked(), lexical=self.\n lexical_checkbox.isChecked(), rule_based=self.\n rule_based_checkbox.isChecked(), machine_learning=self.\n machine_learning_checkbox.isChecked())\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Input')\n message_box.setText(\n \"You haven't added any input to analyze. Please select one or more files or \"\n + 'input some data manually.')\n message_box.exec_()\n return\n",
"step-4": "import os\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\nfrom PyQt5.QtWidgets import QMessageBox\nfrom src import FileDialog, SentimentAnalyzer\n\n\nclass UIMainWindow(object):\n\n def __init__(self):\n font = QtGui.QFont()\n font.setFamily('Myriad Pro')\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName('main_window')\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().\n hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText('')\n self.branding_icon.setPixmap(QtGui.QPixmap(\n '../images/senticompare_logo.png'))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.\n AlignVCenter)\n self.branding_icon.setObjectName('branding_icon')\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily('Optima')\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName('branding_label')\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, \n 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\n 'horizontal_layout_widget_1')\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName('horizontal_layout_1')\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1\n )\n self.run_button.setObjectName('run_button')\n self.run_button.clicked.connect(self.run)\n self.horizontal_layout_1.addWidget(self.run_button)\n self.quit_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_1)\n self.quit_button.setObjectName('quit_button')\n self.quit_button.clicked.connect(self.main_window.close)\n self.horizontal_layout_1.addWidget(self.quit_button)\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName('select_files_tab')\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.\n select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, \n 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\n 'horizontal_layout_widget_2')\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.\n horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName('horizontal_layout_2')\n font.setFamily('Myriad Pro')\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.\n PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName('input_output_box')\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName('file_view')\n self.file_view_model = QStandardItemModel(self.file_view)\n self.file_view.setModel(self.file_view_model)\n self.file_view.show()\n self.input_output_box.addTab(self.select_files_tab, '')\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2\n )\n self.add_button.setFont(font)\n self.add_button.setObjectName('add_button')\n self.add_button.clicked.connect(self.selectFiles)\n self.horizontal_layout_2.addWidget(self.add_button)\n self.delete_button = QtWidgets.QPushButton(self.\n horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName('delete_button')\n self.delete_button.clicked.connect(self.removeFiles)\n self.horizontal_layout_2.addWidget(self.delete_button)\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName('manual_input_tab')\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName('text_input')\n self.input_output_box.addTab(self.manual_input_tab, '')\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName('results_tab')\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName('results_scroll_box')\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName('results_content')\n self.results_scroll_box.setWidget(self.results_content)\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName('results_content_text')\n self.input_output_box.addTab(self.results_tab, '')\n self.input_output_box.setTabEnabled(2, False)\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle('')\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName('group_box_1')\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.\n vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName('vertical_layout_1')\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName('pronoun_checkbox')\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n self.lexical_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName('lexical_checkbox')\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName('rule_based_checkbox')\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.\n vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\n 'machine_learning_checkbox')\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName('help_scroll_box')\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName('help_content')\n self.help_scroll_box.setWidget(self.help_content)\n self.selected_files = {}\n self.input_output_box.setCurrentIndex(0)\n self.retranslateUI()\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n def retranslateUI(self):\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate('main_window',\n 'SentiCompare'))\n self.add_button.setText(_translate('main_window', 'Add'))\n self.delete_button.setText(_translate('main_window', 'Delete'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .select_files_tab), _translate('main_window', 'Select Files'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .manual_input_tab), _translate('main_window', 'Manual Input'))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self\n .results_tab), _translate('main_window', 'Results'))\n self.run_button.setText(_translate('main_window', 'Run'))\n self.quit_button.setText(_translate('main_window', 'Quit'))\n self.pronoun_checkbox.setText(_translate('main_window',\n 'Pronoun Usage'))\n self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))\n self.rule_based_checkbox.setText(_translate('main_window',\n 'Rule Based'))\n self.machine_learning_checkbox.setText(_translate('main_window',\n 'Machine Learning'))\n self.branding_label.setText(_translate('main_window', 'SentiCompare'))\n\n def showWindow(self):\n self.main_window.show()\n\n def selectFiles(self):\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters(['Text files (*.txt)'])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n if file_dialog.getPath() == '':\n return\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n elif file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]\n ] = file_dialog.getPath()\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n def removeFiles(self):\n for i in range(self.file_view_model.rowCount() - 1, -1, -1):\n if self.file_view_model.item(i).checkState():\n filename = self.file_view_model.item(i).text()\n del self.selected_files[filename]\n self.file_view_model.removeRow(i)\n\n def run(self):\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.\n isChecked() or self.rule_based_checkbox.isChecked() or self.\n machine_learning_checkbox.isChecked()):\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Parameters')\n message_box.setText(\n \"You haven't selected any methods of sentiment analysis. Please select at least one \"\n + 'method from the list of options.')\n message_box.exec_()\n return\n if self.input_output_box.currentIndex() == 2:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Select Input')\n message_box.setText(\n 'You must be on the \"Select Files\" page or the \"Manual Input\" page to run '\n +\n 'an analysis. Please select one of those pages and try again.')\n message_box.exec_()\n return\n else:\n progress_bar = QtWidgets.QProgressDialog(\n 'Running Sentiment Analysis...', 'Cancel', 0, 100, self.\n main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files,\n progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(), rule_based=\n self.rule_based_checkbox.isChecked(), machine_learning=\n self.machine_learning_checkbox.isChecked())\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.\n toPlainText(), progress_bar, pronoun=self.\n pronoun_checkbox.isChecked(), lexical=self.\n lexical_checkbox.isChecked(), rule_based=self.\n rule_based_checkbox.isChecked(), machine_learning=self.\n machine_learning_checkbox.isChecked())\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle('Missing Input')\n message_box.setText(\n \"You haven't added any input to analyze. Please select one or more files or \"\n + 'input some data manually.')\n message_box.exec_()\n return\n",
"step-5": "# ================================================== #\n# MAIN WINDOW #\n# ================================================== #\n# Author: Brady Hammond #\n# Created: 11/21/2017 #\n# Last Edited: N/A #\n# Last Edited By: N/A #\n# ================================================== #\n# FILE SETUP #\n# ================================================== #\n\n\n# Import statements\nimport os\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\nfrom PyQt5.QtWidgets import QMessageBox\nfrom src import FileDialog, SentimentAnalyzer\n\n\n# ================================================== #\n# CLASS DEFINITION #\n# ================================================== #\n\n\n# UIMainWindow class definition\nclass UIMainWindow(object):\n\n # Define __init__ function\n def __init__(self):\n # Create main window\n font = QtGui.QFont()\n font.setFamily(\"Myriad Pro\")\n font.setPointSize(14)\n self.main_window = QtWidgets.QWidget()\n self.main_window.setFont(font)\n self.main_window.setObjectName(\"main_window\")\n self.main_window.setWindowModality(QtCore.Qt.WindowModal)\n self.main_window.resize(450, 460)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.main_window.sizePolicy().hasHeightForWidth())\n self.main_window.setSizePolicy(size_policy)\n self.main_window.setMinimumSize(QtCore.QSize(450, 460))\n self.main_window.setMaximumSize(QtCore.QSize(450, 460))\n self.main_window.setBaseSize(QtCore.QSize(450, 460))\n\n # Create branding icon\n self.branding_icon = QtWidgets.QLabel(self.main_window)\n self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))\n self.branding_icon.setText(\"\")\n self.branding_icon.setPixmap(QtGui.QPixmap(\"../images/senticompare_logo.png\"))\n self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)\n self.branding_icon.setObjectName(\"branding_icon\")\n\n # Create branding label\n self.branding_label = QtWidgets.QLabel(self.main_window)\n self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n self.branding_label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(\"Optima\")\n font.setPointSize(50)\n self.branding_label.setFont(font)\n self.branding_label.setObjectName(\"branding_label\")\n\n # Create first horizontal layout\n self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)\n self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, 430, 50))\n self.horizontal_layout_widget_1.setObjectName(\"horizontal_layout_widget_1\")\n self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_1)\n self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_1.setObjectName(\"horizontal_layout_1\")\n\n # Create run button\n self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)\n self.run_button.setObjectName(\"run_button\")\n self.run_button.clicked.connect(self.run)\n\n # Add run button to first horizontal layout\n self.horizontal_layout_1.addWidget(self.run_button)\n\n # Create quit button\n self.quit_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)\n self.quit_button.setObjectName(\"quit_button\")\n self.quit_button.clicked.connect(self.main_window.close)\n\n # Add quit button to first horizontal layout\n self.horizontal_layout_1.addWidget(self.quit_button)\n\n # Create file selection tab\n self.select_files_tab = QtWidgets.QWidget()\n self.select_files_tab.setObjectName(\"select_files_tab\")\n\n # Create second horizontal layout\n self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.select_files_tab)\n self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, 230, 50))\n self.horizontal_layout_widget_2.setObjectName(\"horizontal_layout_widget_2\")\n self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_2)\n self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontal_layout_2.setObjectName(\"horizontal_layout_2\")\n\n # Create input/output tab window\n font.setFamily(\"Myriad Pro\")\n font.setPointSize(12)\n self.input_output_box = QtWidgets.QTabWidget(self.main_window)\n self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))\n self.input_output_box.setFont(font)\n self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)\n self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.input_output_box.setTabsClosable(False)\n self.input_output_box.setObjectName(\"input_output_box\")\n\n # Create file view\n self.file_view = QtWidgets.QListView(self.select_files_tab)\n self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))\n self.file_view.setObjectName(\"file_view\")\n\n # Create file view model\n self.file_view_model = QStandardItemModel(self.file_view)\n\n # Add file view model to file view\n self.file_view.setModel(self.file_view_model)\n\n # Show file view\n self.file_view.show()\n\n # Add file selection tab to input/output tab window\n self.input_output_box.addTab(self.select_files_tab, \"\")\n\n # Create add button\n self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)\n self.add_button.setFont(font)\n self.add_button.setObjectName(\"add_button\")\n self.add_button.clicked.connect(self.selectFiles)\n\n # Add add button to second horizontal layout\n self.horizontal_layout_2.addWidget(self.add_button)\n\n # Create delete button\n self.delete_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)\n self.delete_button.setFont(font)\n self.delete_button.setObjectName(\"delete_button\")\n self.delete_button.clicked.connect(self.removeFiles)\n\n # Add delete button to second horizontal layout\n self.horizontal_layout_2.addWidget(self.delete_button)\n\n # Create manual input tab\n self.manual_input_tab = QtWidgets.QWidget()\n self.manual_input_tab.setObjectName(\"manual_input_tab\")\n\n # Create text input\n self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)\n self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.text_input.setObjectName(\"text_input\")\n\n # Add text input to manual input tab\n self.input_output_box.addTab(self.manual_input_tab, \"\")\n\n # Create results tab\n self.results_tab = QtWidgets.QWidget()\n self.results_tab.setObjectName(\"results_tab\")\n\n # Create results scroll box\n self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)\n self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))\n self.results_scroll_box.setWidgetResizable(True)\n self.results_scroll_box.setObjectName(\"results_scroll_box\")\n\n # Create results content\n self.results_content = QtWidgets.QWidget()\n self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))\n self.results_content.setObjectName(\"results_content\")\n self.results_scroll_box.setWidget(self.results_content)\n\n # Create results content text\n self.results_content_text = QtWidgets.QTextEdit(self.results_content)\n self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))\n self.results_content_text.setReadOnly(True)\n self.results_content_text.setObjectName(\"results_content_text\")\n\n # Add results tab to input/output tab window\n self.input_output_box.addTab(self.results_tab, \"\")\n\n # Disable results tab\n self.input_output_box.setTabEnabled(2, False)\n\n # Create first group box\n font.setPointSize(14)\n self.group_box_1 = QtWidgets.QGroupBox(self.main_window)\n self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))\n self.group_box_1.setFont(font)\n self.group_box_1.setTitle(\"\")\n self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)\n self.group_box_1.setFlat(False)\n self.group_box_1.setCheckable(False)\n self.group_box_1.setObjectName(\"group_box_1\")\n\n # Create first vertical layout\n self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)\n self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))\n self.vertical_layout_widget_1.setObjectName(\"vertical_layout_widget_1\")\n self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.vertical_layout_widget_1)\n self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)\n self.vertical_layout_1.setObjectName(\"vertical_layout_1\")\n\n # Create pronoun checkbox\n self.pronoun_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.pronoun_checkbox.setFont(font)\n self.pronoun_checkbox.setObjectName(\"pronoun_checkbox\")\n\n # Add pronoun checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.pronoun_checkbox)\n\n # Create lexical checkbox\n self.lexical_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.lexical_checkbox.setFont(font)\n self.lexical_checkbox.setObjectName(\"lexical_checkbox\")\n\n # Add lexical checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.lexical_checkbox)\n\n # Create rule based checkbox\n self.rule_based_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.rule_based_checkbox.setFont(font)\n self.rule_based_checkbox.setObjectName(\"rule_based_checkbox\")\n\n # Add rule_based checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.rule_based_checkbox)\n\n # Create machine learning checkbox\n self.machine_learning_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)\n self.machine_learning_checkbox.setFont(font)\n self.machine_learning_checkbox.setObjectName(\"machine_learning_checkbox\")\n\n # Add machine learning checkbox to first vertical layout\n self.vertical_layout_1.addWidget(self.machine_learning_checkbox)\n\n # Create help scroll box\n self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)\n self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))\n self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.help_scroll_box.setWidgetResizable(True)\n self.help_scroll_box.setObjectName(\"help_scroll_box\")\n\n # Create help content\n self.help_content = QtWidgets.QWidget()\n self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))\n self.help_content.setObjectName(\"help_content\")\n self.help_scroll_box.setWidget(self.help_content)\n\n # Create selected files variable\n self.selected_files = {}\n\n # Set current tab\n self.input_output_box.setCurrentIndex(0)\n\n # Retranslate UI\n self.retranslateUI()\n\n # Connect UI slots\n QtCore.QMetaObject.connectSlotsByName(self.main_window)\n\n # ============================================== #\n\n # Define retranslateUI function\n def retranslateUI(self):\n # Add text to ui elements\n _translate = QtCore.QCoreApplication.translate\n self.main_window.setWindowTitle(_translate(\"main_window\", \"SentiCompare\"))\n self.add_button.setText(_translate(\"main_window\", \"Add\"))\n self.delete_button.setText(_translate(\"main_window\", \"Delete\"))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self.select_files_tab),\n _translate(\"main_window\", \"Select Files\"))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self.manual_input_tab),\n _translate(\"main_window\", \"Manual Input\"))\n self.input_output_box.setTabText(self.input_output_box.indexOf(self.results_tab),\n _translate(\"main_window\", \"Results\"))\n self.run_button.setText(_translate(\"main_window\", \"Run\"))\n self.quit_button.setText(_translate(\"main_window\", \"Quit\"))\n self.pronoun_checkbox.setText(_translate(\"main_window\", \"Pronoun Usage\"))\n self.lexical_checkbox.setText(_translate(\"main_window\", \"Lexical\"))\n self.rule_based_checkbox.setText(_translate(\"main_window\", \"Rule Based\"))\n self.machine_learning_checkbox.setText(_translate(\"main_window\", \"Machine Learning\"))\n self.branding_label.setText(_translate(\"main_window\", \"SentiCompare\"))\n\n # ============================================== #\n\n # Define showWindow function\n def showWindow(self):\n self.main_window.show()\n\n # ============================================== #\n\n # Define selectFiles function\n def selectFiles(self):\n # Create file dialog\n file_dialog = FileDialog(self.main_window)\n file_dialog.setFilters([\"Text files (*.txt)\"])\n file_dialog.setDefaultFilterIndex = 0\n file_dialog.setDefaultDirectory(os.path.expanduser('~'))\n file_dialog.exec()\n\n # Return if nothing was selected\n if file_dialog.getPath() == '':\n return\n\n # Add files from selected directory to file list\n elif file_dialog.getFilename()[2] == '':\n for file in os.listdir(file_dialog.getPath()):\n if file.endswith('.txt') and not file.startswith('.'):\n file_path = os.path.join(file_dialog.getPath(), file)\n\n if file_path not in self.selected_files:\n self.selected_files[file] = file_path\n\n item = QStandardItem(file)\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n # Add selected file to list\n else:\n if file_dialog.getPath() not in self.selected_files:\n self.selected_files[file_dialog.getFilename()[1]] = file_dialog.getPath()\n\n item = QStandardItem(file_dialog.getFilename()[1])\n item.setCheckable(True)\n self.file_view_model.appendRow(item)\n\n # ============================================== #\n\n # Define removeFiles function\n def removeFiles(self):\n # Remove all checked files\n for i in range(self.file_view_model.rowCount() - 1, -1, -1):\n if self.file_view_model.item(i).checkState():\n filename = self.file_view_model.item(i).text()\n del self.selected_files[filename]\n self.file_view_model.removeRow(i)\n\n # ============================================== #\n\n # Define run function\n def run(self):\n # Check if an analysis method is selected\n if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.isChecked() or\n self.rule_based_checkbox.isChecked() or self.machine_learning_checkbox.isChecked()):\n # Create and show an error message\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle(\"Missing Parameters\")\n message_box.setText(\"You haven't selected any methods of sentiment analysis. Please select at least one \" +\n \"method from the list of options.\")\n message_box.exec_()\n return\n\n # Check if the current tab is valid\n if self.input_output_box.currentIndex() == 2:\n # Create and show error message\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle(\"Select Input\")\n message_box.setText(\"You must be on the \\\"Select Files\\\" page or the \\\"Manual Input\\\" page to run \" +\n \"an analysis. Please select one of those pages and try again.\")\n message_box.exec_()\n return\n\n else:\n progress_bar = QtWidgets.QProgressDialog(\"Running Sentiment Analysis...\", \"Cancel\", 0, 100, self.main_window)\n progress_bar.setValue(0)\n progress_bar.setCancelButton(None)\n progress_bar.setWindowModality(QtCore.Qt.WindowModal)\n progress_bar.resize(400, 50)\n progress_bar.show()\n\n # Analyze selected files\n if self.input_output_box.currentIndex() == 0:\n sentiment_analyzer = SentimentAnalyzer(self.selected_files, progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(),\n rule_based=self.rule_based_checkbox.isChecked(),\n machine_learning=self.machine_learning_checkbox.isChecked())\n\n # Analyze manual input\n else:\n sentiment_analyzer = SentimentAnalyzer(self.text_input.toPlainText(), progress_bar, pronoun=self.pronoun_checkbox.isChecked(),\n lexical=self.lexical_checkbox.isChecked(),\n rule_based=self.rule_based_checkbox.isChecked(),\n machine_learning=self.machine_learning_checkbox.isChecked())\n\n results = sentiment_analyzer.runAnalyses()\n progress_bar.close()\n\n if results:\n self.results_content_text.setText(results)\n self.input_output_box.setTabEnabled(2, True)\n self.input_output_box.setCurrentIndex(2)\n\n else:\n message_box = QMessageBox()\n message_box.setIcon(QMessageBox.Warning)\n message_box.setWindowTitle(\"Missing Input\")\n message_box.setText(\"You haven't added any input to analyze. Please select one or more files or \" +\n \"input some data manually.\")\n message_box.exec_()\n return\n\n# ================================================== #\n# EOF #\n# ================================================== #\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import time
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def open_browser(browser="chrome"):
driver = None
if browser == "chrome":
driver = webdriver.Chrome()
elif browser == "firefox":
driver = webdriver.Firefox()
elif browser == "ie":
driver = webdriver.Ie()
else:
# driver = None
print("请输入正确的浏览器,例如'chrome','Firefox','ie'")
return driver
class Base:
def __init__(self, driver):
self.driver = driver
def open_url(self, url):
self.driver.get(url)
self.driver.maximize_window() # 窗口最大化
def find_element(self, locator, timeout=10):
element = WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(locator))
return element
def find_elements(self, locator, timeout=10):
elements = WebDriverWait(self.driver, timeout).until(EC.presence_of_all_elements_located(locator))
return elements
def click(self, locator, timeout=10):
element = self.find_element(locator=locator, timeout=timeout)
element.click()
def send_keys(self, locator, text, timeout=10):
element = self.find_element(locator=locator, timeout=timeout)
element.clear()
element.send_keys(text)
def is_text_in_element(self, locator, text, timeout=10):
try:
result = WebDriverWait(self.driver, timeout=timeout).until(EC.text_to_be_present_in_element(locator, text))
return result
except:
return False
def is_value_in_element(self, locator, value, timeout=10):
try:
result = WebDriverWait(self.driver, timeout=timeout).until(
EC.text_to_be_present_in_element_value(locator, value))
return result
except:
return False
def close_browser(self):
self.driver.quit()
|
normal
|
{
"blob_id": "82fc86e44d02c45d7904139e4dfdff069e2bdb90",
"index": 5634,
"step-1": "<mask token>\n\n\nclass Base:\n <mask token>\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n <mask token>\n <mask token>\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Base:\n\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_all_elements_located(locator))\n return elements\n <mask token>\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n",
"step-3": "<mask token>\n\n\ndef open_browser(browser='chrome'):\n driver = None\n if browser == 'chrome':\n driver = webdriver.Chrome()\n elif browser == 'firefox':\n driver = webdriver.Firefox()\n elif browser == 'ie':\n driver = webdriver.Ie()\n else:\n print(\"请输入正确的浏览器,例如'chrome','Firefox','ie'\")\n return driver\n\n\nclass Base:\n\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.click()\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n",
"step-4": "import time\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef open_browser(browser='chrome'):\n driver = None\n if browser == 'chrome':\n driver = webdriver.Chrome()\n elif browser == 'firefox':\n driver = webdriver.Firefox()\n elif browser == 'ie':\n driver = webdriver.Ie()\n else:\n print(\"请输入正确的浏览器,例如'chrome','Firefox','ie'\")\n return driver\n\n\nclass Base:\n\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.click()\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n",
"step-5": "import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef open_browser(browser=\"chrome\"):\n driver = None\n if browser == \"chrome\":\n driver = webdriver.Chrome()\n elif browser == \"firefox\":\n driver = webdriver.Firefox()\n elif browser == \"ie\":\n driver = webdriver.Ie()\n else:\n # driver = None\n print(\"请输入正确的浏览器,例如'chrome','Firefox','ie'\")\n return driver\n\n\nclass Base:\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window() # 窗口最大化\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.click()\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(\n EC.text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n\n\n\n",
"step-ids": [
6,
9,
11,
12,
13
]
}
|
[
6,
9,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trainer:
def html_escape(self, text):
html_escape_table = {'"': '"', "'": '''}
return escape(text, html_escape_table)
def train(self, preprocessedxml, xmlname):
f = open('../TrainingData/htmls/train' + xmlname + '.html', 'w')
f.write(
'<html><body><form action="http://localhost/cgi-bin/TableProcessor.py" method="post">'
)
f.write('<input type="hidden" name="xmlname" value="' + xmlname + '"/>'
)
i = 0
pageno = 0
colno = 0
for page in preprocessedxml:
f.write(
'<div class="page"><input type="hidden" name="pagebegin' +
str(pageno) + '" value="' + str(colno) + '"/>')
for col in page:
f.write(
'<div class="col"><input type="hidden" name="colbegin' +
str(colno) + '" value="' + str(i) + '"/>')
for tup in col:
f.write('<div><select id="docparams" name="docparams' +
str(i) + '">')
f.write('<option value="sparse">Sparse</option>')
f.write(
'<option value="nonsparse" selected="selected">Not Sparse</option>'
)
f.write("</select><input type='hidden' name='texttag" +
str(i) + "' value='" + self.html_escape(ET.tostring
(tup[1], 'utf-8', 'xml')) + "'/>" + ET.tostring(tup
[1]) + '</div>')
i += 1
f.write('<input type="hidden" name="colend' + str(colno) +
'" value="' + str(i) + '"/><div>')
colno += 1
f.write('<input type="hidden" name="pageend' + str(pageno) +
'" value="' + str(colno) + '"/> <div>')
pageno += 1
f.write('<input type="submit" value="Done!"/></form></body></html>')
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trainer:
def html_escape(self, text):
html_escape_table = {'"': '"', "'": '''}
return escape(text, html_escape_table)
def train(self, preprocessedxml, xmlname):
f = open('../TrainingData/htmls/train' + xmlname + '.html', 'w')
f.write(
'<html><body><form action="http://localhost/cgi-bin/TableProcessor.py" method="post">'
)
f.write('<input type="hidden" name="xmlname" value="' + xmlname + '"/>'
)
i = 0
pageno = 0
colno = 0
for page in preprocessedxml:
f.write(
'<div class="page"><input type="hidden" name="pagebegin' +
str(pageno) + '" value="' + str(colno) + '"/>')
for col in page:
f.write(
'<div class="col"><input type="hidden" name="colbegin' +
str(colno) + '" value="' + str(i) + '"/>')
for tup in col:
f.write('<div><select id="docparams" name="docparams' +
str(i) + '">')
f.write('<option value="sparse">Sparse</option>')
f.write(
'<option value="nonsparse" selected="selected">Not Sparse</option>'
)
f.write("</select><input type='hidden' name='texttag" +
str(i) + "' value='" + self.html_escape(ET.tostring
(tup[1], 'utf-8', 'xml')) + "'/>" + ET.tostring(tup
[1]) + '</div>')
i += 1
f.write('<input type="hidden" name="colend' + str(colno) +
'" value="' + str(i) + '"/><div>')
colno += 1
f.write('<input type="hidden" name="pageend' + str(pageno) +
'" value="' + str(colno) + '"/> <div>')
pageno += 1
f.write('<input type="submit" value="Done!"/></form></body></html>')
f.close()
<|reserved_special_token_0|>
def readAnnotatedxmlforTableDecomposition(self, xmlname):
f = open(xmlname)
table = list()
for line in f:
if line.strip() == '':
continue
tup0 = line[:line.find('\t')]
tup1 = line[line.find('\t') + 1:]
table.append([tup0, ET.fromstring(tup1)])
return table
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
<|reserved_special_token_0|>
class Trainer:
def html_escape(self, text):
html_escape_table = {'"': '"', "'": '''}
return escape(text, html_escape_table)
def train(self, preprocessedxml, xmlname):
f = open('../TrainingData/htmls/train' + xmlname + '.html', 'w')
f.write(
'<html><body><form action="http://localhost/cgi-bin/TableProcessor.py" method="post">'
)
f.write('<input type="hidden" name="xmlname" value="' + xmlname + '"/>'
)
i = 0
pageno = 0
colno = 0
for page in preprocessedxml:
f.write(
'<div class="page"><input type="hidden" name="pagebegin' +
str(pageno) + '" value="' + str(colno) + '"/>')
for col in page:
f.write(
'<div class="col"><input type="hidden" name="colbegin' +
str(colno) + '" value="' + str(i) + '"/>')
for tup in col:
f.write('<div><select id="docparams" name="docparams' +
str(i) + '">')
f.write('<option value="sparse">Sparse</option>')
f.write(
'<option value="nonsparse" selected="selected">Not Sparse</option>'
)
f.write("</select><input type='hidden' name='texttag" +
str(i) + "' value='" + self.html_escape(ET.tostring
(tup[1], 'utf-8', 'xml')) + "'/>" + ET.tostring(tup
[1]) + '</div>')
i += 1
f.write('<input type="hidden" name="colend' + str(colno) +
'" value="' + str(i) + '"/><div>')
colno += 1
f.write('<input type="hidden" name="pageend' + str(pageno) +
'" value="' + str(colno) + '"/> <div>')
pageno += 1
f.write('<input type="submit" value="Done!"/></form></body></html>')
f.close()
def readAnnotatedXml(self, xmlname):
f = open(xmlname)
preprocessedxml = list()
col = list()
for line in f:
if (line ==
"""=============================== PAGE ===================================
"""
):
pagelist = list()
preprocessedxml.append(pagelist)
elif line == """=============================== COL ===================================
""":
col = list()
pagelist.append(col)
else:
tup0 = line[:line.find(' ')]
tup1 = line[line.find(' ') + 1:]
col.append([tup0, ET.fromstring(tup1)])
return preprocessedxml
def readAnnotatedxmlforTableDecomposition(self, xmlname):
f = open(xmlname)
table = list()
for line in f:
if line.strip() == '':
continue
tup0 = line[:line.find('\t')]
tup1 = line[line.find('\t') + 1:]
table.append([tup0, ET.fromstring(tup1)])
return table
<|reserved_special_token_1|>
'''
Created on Nov 20, 2012
@author: shriram
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
'''
Annotating only Sparse and Non Sparse Lines
'''
class Trainer:
def html_escape(self,text):
html_escape_table = {
'"': """,
"'": "'"
}
return escape(text, html_escape_table)
def train(self, preprocessedxml, xmlname):
f = open('../TrainingData/htmls/train'+xmlname+'.html','w')
f.write('<html><body><form action="http://localhost/cgi-bin/TableProcessor.py" method="post">')
f.write('<input type="hidden" name="xmlname" value="'+xmlname +'"/>')
i = 0
pageno = 0
colno = 0
for page in preprocessedxml:
f.write('<div class="page"><input type="hidden" name="pagebegin'+str(pageno)+'" value="'+str(colno)+'"/>')
for col in page:
f.write('<div class="col"><input type="hidden" name="colbegin'+str(colno)+'" value="'+str(i)+'"/>')
for tup in col:
f.write('<div><select id="docparams" name="docparams'+ str(i) +'">')
f.write('<option value="sparse">Sparse</option>')
f.write('<option value="nonsparse" selected="selected">Not Sparse</option>')
f.write("</select><input type='hidden' name='texttag"+str(i)+"' value='"+ self.html_escape(ET.tostring(tup[1],'utf-8',"xml")) + "'/>"+ ET.tostring(tup[1]) +"</div>")
i += 1
f.write('<input type="hidden" name="colend'+str(colno)+'" value="'+str(i)+'"/><div>')
colno += 1
f.write('<input type="hidden" name="pageend'+str(pageno)+'" value="'+str(colno)+'"/> <div>')
pageno += 1
f.write('<input type="submit" value="Done!"/></form></body></html>')
f.close()
def readAnnotatedXml(self,xmlname):
f = open(xmlname)
preprocessedxml = list()
col = list()
for line in f:
if(line == "=============================== PAGE ===================================\n"):
pagelist = list()
preprocessedxml.append(pagelist)
elif(line == "=============================== COL ===================================\n"):
col = list()
pagelist.append(col)
else:
tup0 = line[:line.find(" ")]
tup1 = line[line.find(" ")+1:]
col.append([tup0,ET.fromstring(tup1)])
return preprocessedxml
def readAnnotatedxmlforTableDecomposition(self, xmlname):
f = open(xmlname)
table = list()
for line in f:
if(line.strip() == ''):
continue
tup0 = line[:line.find("\t")]
tup1 = line[line.find("\t")+1:]
table.append([tup0,ET.fromstring(tup1)])
return table
|
flexible
|
{
"blob_id": "22e24e8dd49367ae57d1980c4addf48d65c5e897",
"index": 7851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Trainer:\n\n def html_escape(self, text):\n html_escape_table = {'\"': '"', \"'\": '''}\n return escape(text, html_escape_table)\n\n def train(self, preprocessedxml, xmlname):\n f = open('../TrainingData/htmls/train' + xmlname + '.html', 'w')\n f.write(\n '<html><body><form action=\"http://localhost/cgi-bin/TableProcessor.py\" method=\"post\">'\n )\n f.write('<input type=\"hidden\" name=\"xmlname\" value=\"' + xmlname + '\"/>'\n )\n i = 0\n pageno = 0\n colno = 0\n for page in preprocessedxml:\n f.write(\n '<div class=\"page\"><input type=\"hidden\" name=\"pagebegin' +\n str(pageno) + '\" value=\"' + str(colno) + '\"/>')\n for col in page:\n f.write(\n '<div class=\"col\"><input type=\"hidden\" name=\"colbegin' +\n str(colno) + '\" value=\"' + str(i) + '\"/>')\n for tup in col:\n f.write('<div><select id=\"docparams\" name=\"docparams' +\n str(i) + '\">')\n f.write('<option value=\"sparse\">Sparse</option>')\n f.write(\n '<option value=\"nonsparse\" selected=\"selected\">Not Sparse</option>'\n )\n f.write(\"</select><input type='hidden' name='texttag\" +\n str(i) + \"' value='\" + self.html_escape(ET.tostring\n (tup[1], 'utf-8', 'xml')) + \"'/>\" + ET.tostring(tup\n [1]) + '</div>')\n i += 1\n f.write('<input type=\"hidden\" name=\"colend' + str(colno) +\n '\" value=\"' + str(i) + '\"/><div>')\n colno += 1\n f.write('<input type=\"hidden\" name=\"pageend' + str(pageno) +\n '\" value=\"' + str(colno) + '\"/> <div>')\n pageno += 1\n f.write('<input type=\"submit\" value=\"Done!\"/></form></body></html>')\n f.close()\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Trainer:\n\n def html_escape(self, text):\n html_escape_table = {'\"': '"', \"'\": '''}\n return escape(text, html_escape_table)\n\n def train(self, preprocessedxml, xmlname):\n f = open('../TrainingData/htmls/train' + xmlname + '.html', 'w')\n f.write(\n '<html><body><form action=\"http://localhost/cgi-bin/TableProcessor.py\" method=\"post\">'\n )\n f.write('<input type=\"hidden\" name=\"xmlname\" value=\"' + xmlname + '\"/>'\n )\n i = 0\n pageno = 0\n colno = 0\n for page in preprocessedxml:\n f.write(\n '<div class=\"page\"><input type=\"hidden\" name=\"pagebegin' +\n str(pageno) + '\" value=\"' + str(colno) + '\"/>')\n for col in page:\n f.write(\n '<div class=\"col\"><input type=\"hidden\" name=\"colbegin' +\n str(colno) + '\" value=\"' + str(i) + '\"/>')\n for tup in col:\n f.write('<div><select id=\"docparams\" name=\"docparams' +\n str(i) + '\">')\n f.write('<option value=\"sparse\">Sparse</option>')\n f.write(\n '<option value=\"nonsparse\" selected=\"selected\">Not Sparse</option>'\n )\n f.write(\"</select><input type='hidden' name='texttag\" +\n str(i) + \"' value='\" + self.html_escape(ET.tostring\n (tup[1], 'utf-8', 'xml')) + \"'/>\" + ET.tostring(tup\n [1]) + '</div>')\n i += 1\n f.write('<input type=\"hidden\" name=\"colend' + str(colno) +\n '\" value=\"' + str(i) + '\"/><div>')\n colno += 1\n f.write('<input type=\"hidden\" name=\"pageend' + str(pageno) +\n '\" value=\"' + str(colno) + '\"/> <div>')\n pageno += 1\n f.write('<input type=\"submit\" value=\"Done!\"/></form></body></html>')\n f.close()\n <mask token>\n\n def readAnnotatedxmlforTableDecomposition(self, xmlname):\n f = open(xmlname)\n table = list()\n for line in f:\n if line.strip() == '':\n continue\n tup0 = line[:line.find('\\t')]\n tup1 = line[line.find('\\t') + 1:]\n table.append([tup0, ET.fromstring(tup1)])\n return table\n",
"step-4": "<mask token>\nimport xml.etree.ElementTree as ET\nfrom xml.sax.saxutils import escape\n<mask token>\n\n\nclass Trainer:\n\n def html_escape(self, text):\n html_escape_table = {'\"': '"', \"'\": '''}\n return escape(text, html_escape_table)\n\n def train(self, preprocessedxml, xmlname):\n f = open('../TrainingData/htmls/train' + xmlname + '.html', 'w')\n f.write(\n '<html><body><form action=\"http://localhost/cgi-bin/TableProcessor.py\" method=\"post\">'\n )\n f.write('<input type=\"hidden\" name=\"xmlname\" value=\"' + xmlname + '\"/>'\n )\n i = 0\n pageno = 0\n colno = 0\n for page in preprocessedxml:\n f.write(\n '<div class=\"page\"><input type=\"hidden\" name=\"pagebegin' +\n str(pageno) + '\" value=\"' + str(colno) + '\"/>')\n for col in page:\n f.write(\n '<div class=\"col\"><input type=\"hidden\" name=\"colbegin' +\n str(colno) + '\" value=\"' + str(i) + '\"/>')\n for tup in col:\n f.write('<div><select id=\"docparams\" name=\"docparams' +\n str(i) + '\">')\n f.write('<option value=\"sparse\">Sparse</option>')\n f.write(\n '<option value=\"nonsparse\" selected=\"selected\">Not Sparse</option>'\n )\n f.write(\"</select><input type='hidden' name='texttag\" +\n str(i) + \"' value='\" + self.html_escape(ET.tostring\n (tup[1], 'utf-8', 'xml')) + \"'/>\" + ET.tostring(tup\n [1]) + '</div>')\n i += 1\n f.write('<input type=\"hidden\" name=\"colend' + str(colno) +\n '\" value=\"' + str(i) + '\"/><div>')\n colno += 1\n f.write('<input type=\"hidden\" name=\"pageend' + str(pageno) +\n '\" value=\"' + str(colno) + '\"/> <div>')\n pageno += 1\n f.write('<input type=\"submit\" value=\"Done!\"/></form></body></html>')\n f.close()\n\n def readAnnotatedXml(self, xmlname):\n f = open(xmlname)\n preprocessedxml = list()\n col = list()\n for line in f:\n if (line ==\n \"\"\"=============================== PAGE ===================================\n\"\"\"\n ):\n pagelist = list()\n preprocessedxml.append(pagelist)\n elif line == \"\"\"=============================== COL ===================================\n\"\"\":\n col = list()\n pagelist.append(col)\n else:\n tup0 = line[:line.find(' ')]\n tup1 = line[line.find(' ') + 1:]\n col.append([tup0, ET.fromstring(tup1)])\n return preprocessedxml\n\n def readAnnotatedxmlforTableDecomposition(self, xmlname):\n f = open(xmlname)\n table = list()\n for line in f:\n if line.strip() == '':\n continue\n tup0 = line[:line.find('\\t')]\n tup1 = line[line.find('\\t') + 1:]\n table.append([tup0, ET.fromstring(tup1)])\n return table\n",
"step-5": "'''\nCreated on Nov 20, 2012\n\n@author: shriram\n'''\nimport xml.etree.ElementTree as ET\nfrom xml.sax.saxutils import escape\n\n'''\n Annotating only Sparse and Non Sparse Lines\n'''\nclass Trainer:\n def html_escape(self,text):\n html_escape_table = {\n '\"': \""\",\n \"'\": \"'\"\n }\n return escape(text, html_escape_table)\n \n def train(self, preprocessedxml, xmlname):\n f = open('../TrainingData/htmls/train'+xmlname+'.html','w')\n f.write('<html><body><form action=\"http://localhost/cgi-bin/TableProcessor.py\" method=\"post\">')\n f.write('<input type=\"hidden\" name=\"xmlname\" value=\"'+xmlname +'\"/>')\n i = 0\n pageno = 0\n colno = 0\n for page in preprocessedxml:\n f.write('<div class=\"page\"><input type=\"hidden\" name=\"pagebegin'+str(pageno)+'\" value=\"'+str(colno)+'\"/>')\n for col in page:\n f.write('<div class=\"col\"><input type=\"hidden\" name=\"colbegin'+str(colno)+'\" value=\"'+str(i)+'\"/>')\n for tup in col:\n f.write('<div><select id=\"docparams\" name=\"docparams'+ str(i) +'\">')\n f.write('<option value=\"sparse\">Sparse</option>')\n f.write('<option value=\"nonsparse\" selected=\"selected\">Not Sparse</option>')\n f.write(\"</select><input type='hidden' name='texttag\"+str(i)+\"' value='\"+ self.html_escape(ET.tostring(tup[1],'utf-8',\"xml\")) + \"'/>\"+ ET.tostring(tup[1]) +\"</div>\")\n i += 1\n f.write('<input type=\"hidden\" name=\"colend'+str(colno)+'\" value=\"'+str(i)+'\"/><div>')\n colno += 1\n f.write('<input type=\"hidden\" name=\"pageend'+str(pageno)+'\" value=\"'+str(colno)+'\"/> <div>')\n pageno += 1\n f.write('<input type=\"submit\" value=\"Done!\"/></form></body></html>')\n f.close()\n \n def readAnnotatedXml(self,xmlname):\n f = open(xmlname)\n preprocessedxml = list()\n col = list()\n for line in f:\n if(line == \"=============================== PAGE ===================================\\n\"):\n pagelist = list()\n preprocessedxml.append(pagelist)\n elif(line == \"=============================== COL ===================================\\n\"):\n col = list()\n pagelist.append(col)\n else:\n tup0 = line[:line.find(\" \")]\n tup1 = line[line.find(\" \")+1:]\n col.append([tup0,ET.fromstring(tup1)])\n \n return preprocessedxml\n \n def readAnnotatedxmlforTableDecomposition(self, xmlname):\n f = open(xmlname)\n table = list()\n for line in f:\n if(line.strip() == ''):\n continue\n tup0 = line[:line.find(\"\\t\")]\n tup1 = line[line.find(\"\\t\")+1:]\n table.append([tup0,ET.fromstring(tup1)])\n return table\n \n \n \n \n \n ",
"step-ids": [
0,
3,
4,
6,
7
]
}
|
[
0,
3,
4,
6,
7
] |
<|reserved_special_token_0|>
def func(x):
return x ** c
def der_func(x):
return c * x ** (c - 1)
<|reserved_special_token_0|>
def main():
x = 100
v_min = func(x)
for i in range(10):
cur_v = func(x)
x = na_value(x)
if cur_v < v_min:
v_min = cur_v
print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def func(x):
return x ** c
def der_func(x):
return c * x ** (c - 1)
def na_value(x):
return x - n * der_func(x)
def main():
x = 100
v_min = func(x)
for i in range(10):
cur_v = func(x)
x = na_value(x)
if cur_v < v_min:
v_min = cur_v
print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def func(x):
return x ** c
def der_func(x):
return c * x ** (c - 1)
def na_value(x):
return x - n * der_func(x)
def main():
x = 100
v_min = func(x)
for i in range(10):
cur_v = func(x)
x = na_value(x)
if cur_v < v_min:
v_min = cur_v
print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)
main()
<|reserved_special_token_1|>
n = 0.3
c = 2
def func(x):
return x ** c
def der_func(x):
return c * x ** (c - 1)
def na_value(x):
return x - n * der_func(x)
def main():
x = 100
v_min = func(x)
for i in range(10):
cur_v = func(x)
x = na_value(x)
if cur_v < v_min:
v_min = cur_v
print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)
main()
<|reserved_special_token_1|>
n = 0.3
c = 2
def func(x):
return x**c
def der_func(x):
return c * x**(c - 1)
def na_value(x):
return x - n*der_func(x)
def main():
x = 100
v_min = func(x)
for i in range(10):
cur_v = func(x)
x = na_value(x)
if cur_v < v_min:
v_min = cur_v
print("----> " ,i ," cur = ",cur_v," x = ",x," v_min = " ,v_min )
main()
|
flexible
|
{
"blob_id": "fa7246a4e7595393ca9aaec777fa85d782bb816e",
"index": 4815,
"step-1": "<mask token>\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\n<mask token>\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\ndef na_value(x):\n return x - n * der_func(x)\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\ndef na_value(x):\n return x - n * der_func(x)\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\nmain()\n",
"step-4": "n = 0.3\nc = 2\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\ndef na_value(x):\n return x - n * der_func(x)\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\nmain()\n",
"step-5": "\nn = 0.3\nc = 2\n\ndef func(x):\n return x**c \n\ndef der_func(x):\n return c * x**(c - 1)\n\ndef na_value(x):\n return x - n*der_func(x)\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print(\"----> \" ,i ,\" cur = \",cur_v,\" x = \",x,\" v_min = \" ,v_min )\n\n\nmain()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/bin/python3
import sys
from collections import deque
def connectedCell(matrix,n,m):
# Complete this function
visit = []
for j in range(n):
a = []
for i in range(m):
a.append(True)
visit.append(a)
#print(visit)
path = 0
for i in range(n):
for j in range(m):
if visit[i][j]:
count = 0
#visit[i_ind][j_ind] =
nodes = deque([(i,j)])
while nodes:
i_ind, j_ind = nodes.pop()
#visit[i_ind][j_ind] = False
#print(i_ind,j_ind )
if 0 <= i_ind < n and 0 <= j_ind < m and visit[i_ind][j_ind]:
#print(i_ind, j_ind)
visit[i_ind][j_ind] = False
if matrix[i_ind][j_ind] == 1:
count += 1
nodes_list = [(i_ind -1, j_ind-1),
(i_ind -1, j_ind),
(i_ind -1, j_ind+1),
(i_ind, j_ind-1),
(i_ind, j_ind+1),
(i_ind +1, j_ind-1),
(i_ind +1, j_ind),
(i_ind +1, j_ind+1)]
#print(*nodes_list)
nodes.extend(nodes_list)
if count > path:
path = count
return path
# if __name__ == "__main__":
# n = int(input().strip())
# m = int(input().strip())
# matrix = []
# for matrix_i in range(n):
# matrix_t = [int(matrix_temp) for matrix_temp in input().strip().split(' ')]
# matrix.append(matrix_t)
# result = connectedCell(matrix,n,m)
# print(result)
n = 2
m = 2
matrix = [[1]*n]*m
result = connectedCell(matrix,n,m)
print('result = ',result)
|
normal
|
{
"blob_id": "25a159ca2abf0176135086324ab355d6f5d9fe9e",
"index": 5054,
"step-1": "<mask token>\n\n\ndef connectedCell(matrix, n, m):\n visit = []\n for j in range(n):\n a = []\n for i in range(m):\n a.append(True)\n visit.append(a)\n path = 0\n for i in range(n):\n for j in range(m):\n if visit[i][j]:\n count = 0\n nodes = deque([(i, j)])\n while nodes:\n i_ind, j_ind = nodes.pop()\n if 0 <= i_ind < n and 0 <= j_ind < m and visit[i_ind][j_ind\n ]:\n visit[i_ind][j_ind] = False\n if matrix[i_ind][j_ind] == 1:\n count += 1\n nodes_list = [(i_ind - 1, j_ind - 1), (i_ind - \n 1, j_ind), (i_ind - 1, j_ind + 1), (i_ind, \n j_ind - 1), (i_ind, j_ind + 1), (i_ind + 1,\n j_ind - 1), (i_ind + 1, j_ind), (i_ind + 1,\n j_ind + 1)]\n nodes.extend(nodes_list)\n if count > path:\n path = count\n return path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connectedCell(matrix, n, m):\n visit = []\n for j in range(n):\n a = []\n for i in range(m):\n a.append(True)\n visit.append(a)\n path = 0\n for i in range(n):\n for j in range(m):\n if visit[i][j]:\n count = 0\n nodes = deque([(i, j)])\n while nodes:\n i_ind, j_ind = nodes.pop()\n if 0 <= i_ind < n and 0 <= j_ind < m and visit[i_ind][j_ind\n ]:\n visit[i_ind][j_ind] = False\n if matrix[i_ind][j_ind] == 1:\n count += 1\n nodes_list = [(i_ind - 1, j_ind - 1), (i_ind - \n 1, j_ind), (i_ind - 1, j_ind + 1), (i_ind, \n j_ind - 1), (i_ind, j_ind + 1), (i_ind + 1,\n j_ind - 1), (i_ind + 1, j_ind), (i_ind + 1,\n j_ind + 1)]\n nodes.extend(nodes_list)\n if count > path:\n path = count\n return path\n\n\n<mask token>\nprint('result = ', result)\n",
"step-3": "<mask token>\n\n\ndef connectedCell(matrix, n, m):\n visit = []\n for j in range(n):\n a = []\n for i in range(m):\n a.append(True)\n visit.append(a)\n path = 0\n for i in range(n):\n for j in range(m):\n if visit[i][j]:\n count = 0\n nodes = deque([(i, j)])\n while nodes:\n i_ind, j_ind = nodes.pop()\n if 0 <= i_ind < n and 0 <= j_ind < m and visit[i_ind][j_ind\n ]:\n visit[i_ind][j_ind] = False\n if matrix[i_ind][j_ind] == 1:\n count += 1\n nodes_list = [(i_ind - 1, j_ind - 1), (i_ind - \n 1, j_ind), (i_ind - 1, j_ind + 1), (i_ind, \n j_ind - 1), (i_ind, j_ind + 1), (i_ind + 1,\n j_ind - 1), (i_ind + 1, j_ind), (i_ind + 1,\n j_ind + 1)]\n nodes.extend(nodes_list)\n if count > path:\n path = count\n return path\n\n\nn = 2\nm = 2\nmatrix = [[1] * n] * m\nresult = connectedCell(matrix, n, m)\nprint('result = ', result)\n",
"step-4": "import sys\nfrom collections import deque\n\n\ndef connectedCell(matrix, n, m):\n visit = []\n for j in range(n):\n a = []\n for i in range(m):\n a.append(True)\n visit.append(a)\n path = 0\n for i in range(n):\n for j in range(m):\n if visit[i][j]:\n count = 0\n nodes = deque([(i, j)])\n while nodes:\n i_ind, j_ind = nodes.pop()\n if 0 <= i_ind < n and 0 <= j_ind < m and visit[i_ind][j_ind\n ]:\n visit[i_ind][j_ind] = False\n if matrix[i_ind][j_ind] == 1:\n count += 1\n nodes_list = [(i_ind - 1, j_ind - 1), (i_ind - \n 1, j_ind), (i_ind - 1, j_ind + 1), (i_ind, \n j_ind - 1), (i_ind, j_ind + 1), (i_ind + 1,\n j_ind - 1), (i_ind + 1, j_ind), (i_ind + 1,\n j_ind + 1)]\n nodes.extend(nodes_list)\n if count > path:\n path = count\n return path\n\n\nn = 2\nm = 2\nmatrix = [[1] * n] * m\nresult = connectedCell(matrix, n, m)\nprint('result = ', result)\n",
"step-5": "#!/bin/python3\n\nimport sys\nfrom collections import deque\n\ndef connectedCell(matrix,n,m):\n # Complete this function\n visit = []\n for j in range(n):\n a = []\n for i in range(m):\n a.append(True)\n visit.append(a)\n #print(visit)\n path = 0\n for i in range(n):\n for j in range(m):\n if visit[i][j]:\n count = 0\n #visit[i_ind][j_ind] = \n nodes = deque([(i,j)])\n while nodes:\n i_ind, j_ind = nodes.pop()\n #visit[i_ind][j_ind] = False\n #print(i_ind,j_ind )\n if 0 <= i_ind < n and 0 <= j_ind < m and visit[i_ind][j_ind]:\n #print(i_ind, j_ind)\n visit[i_ind][j_ind] = False\n if matrix[i_ind][j_ind] == 1:\n count += 1\n nodes_list = [(i_ind -1, j_ind-1),\n (i_ind -1, j_ind),\n (i_ind -1, j_ind+1), \n (i_ind, j_ind-1),\n (i_ind, j_ind+1),\n (i_ind +1, j_ind-1),\n (i_ind +1, j_ind),\n (i_ind +1, j_ind+1)]\n #print(*nodes_list)\n nodes.extend(nodes_list)\n if count > path:\n path = count\n return path\n \n \n \n \n\n# if __name__ == \"__main__\":\n# n = int(input().strip())\n# m = int(input().strip())\n# matrix = []\n# for matrix_i in range(n):\n# matrix_t = [int(matrix_temp) for matrix_temp in input().strip().split(' ')]\n# matrix.append(matrix_t)\n# result = connectedCell(matrix,n,m)\n# print(result)\nn = 2\nm = 2\nmatrix = [[1]*n]*m\nresult = connectedCell(matrix,n,m)\nprint('result = ',result)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from utilities.MatplotlibUtility import *
from utilities.PlotDefinitions.DrainSweep.OutputCurve import plot as importedOutputCurvePlot
plotDescription = {
'name':'Chip Output Curves',
'plotCategory': 'chip',
'priority': 40,
'dataFileDependencies': ['DrainSweep.json'],
'plotDefaults': {
'figsize':(2,2.5),
'colorMap':'magma',
},
}
def plot(identifiers, chipIndexes, firstRunChipHistory, recentRunChipHistory, specificRunChipHistory, groupedChipHistory, mode_parameters=None):
if(mode_parameters is None):
mode_parameters = {}
#mode_parameters['enableColorBar'] = False
mode_parameters['colorsOverride'] = (plotDescription['plotDefaults']['colorMap'], 0.85, 0) if(mode_parameters['colorsOverride'] == []) else mode_parameters['colorsOverride']
mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults']['figsize'] if(mode_parameters['figureSizeOverride'] is None) else mode_parameters['figureSizeOverride']
return importedOutputCurvePlot(specificRunChipHistory, identifiers=identifiers, mode_parameters=mode_parameters)
|
normal
|
{
"blob_id": "49ae9e90402d784fc3af3b47e96842fbfe842104",
"index": 9480,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory,\n recentRunChipHistory, specificRunChipHistory, groupedChipHistory,\n mode_parameters=None):\n if mode_parameters is None:\n mode_parameters = {}\n mode_parameters['colorsOverride'] = (plotDescription['plotDefaults'][\n 'colorMap'], 0.85, 0) if mode_parameters['colorsOverride'] == [\n ] else mode_parameters['colorsOverride']\n mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults'][\n 'figsize'] if mode_parameters['figureSizeOverride'\n ] is None else mode_parameters['figureSizeOverride']\n return importedOutputCurvePlot(specificRunChipHistory, identifiers=\n identifiers, mode_parameters=mode_parameters)\n",
"step-3": "<mask token>\nplotDescription = {'name': 'Chip Output Curves', 'plotCategory': 'chip',\n 'priority': 40, 'dataFileDependencies': ['DrainSweep.json'],\n 'plotDefaults': {'figsize': (2, 2.5), 'colorMap': 'magma'}}\n\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory,\n recentRunChipHistory, specificRunChipHistory, groupedChipHistory,\n mode_parameters=None):\n if mode_parameters is None:\n mode_parameters = {}\n mode_parameters['colorsOverride'] = (plotDescription['plotDefaults'][\n 'colorMap'], 0.85, 0) if mode_parameters['colorsOverride'] == [\n ] else mode_parameters['colorsOverride']\n mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults'][\n 'figsize'] if mode_parameters['figureSizeOverride'\n ] is None else mode_parameters['figureSizeOverride']\n return importedOutputCurvePlot(specificRunChipHistory, identifiers=\n identifiers, mode_parameters=mode_parameters)\n",
"step-4": "from utilities.MatplotlibUtility import *\nfrom utilities.PlotDefinitions.DrainSweep.OutputCurve import plot as importedOutputCurvePlot\nplotDescription = {'name': 'Chip Output Curves', 'plotCategory': 'chip',\n 'priority': 40, 'dataFileDependencies': ['DrainSweep.json'],\n 'plotDefaults': {'figsize': (2, 2.5), 'colorMap': 'magma'}}\n\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory,\n recentRunChipHistory, specificRunChipHistory, groupedChipHistory,\n mode_parameters=None):\n if mode_parameters is None:\n mode_parameters = {}\n mode_parameters['colorsOverride'] = (plotDescription['plotDefaults'][\n 'colorMap'], 0.85, 0) if mode_parameters['colorsOverride'] == [\n ] else mode_parameters['colorsOverride']\n mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults'][\n 'figsize'] if mode_parameters['figureSizeOverride'\n ] is None else mode_parameters['figureSizeOverride']\n return importedOutputCurvePlot(specificRunChipHistory, identifiers=\n identifiers, mode_parameters=mode_parameters)\n",
"step-5": "from utilities.MatplotlibUtility import *\nfrom utilities.PlotDefinitions.DrainSweep.OutputCurve import plot as importedOutputCurvePlot\n\n\nplotDescription = {\n\t'name':'Chip Output Curves',\n\t'plotCategory': 'chip',\n\t'priority': 40,\n\t'dataFileDependencies': ['DrainSweep.json'],\n\t'plotDefaults': {\n\t\t'figsize':(2,2.5),\n\t\t'colorMap':'magma',\n\t},\n}\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory, recentRunChipHistory, specificRunChipHistory, groupedChipHistory, mode_parameters=None):\n\tif(mode_parameters is None):\n\t\tmode_parameters = {}\n\t#mode_parameters['enableColorBar'] = False\n\tmode_parameters['colorsOverride'] = (plotDescription['plotDefaults']['colorMap'], 0.85, 0) if(mode_parameters['colorsOverride'] == []) else mode_parameters['colorsOverride']\n\tmode_parameters['figureSizeOverride'] = plotDescription['plotDefaults']['figsize'] \t\t if(mode_parameters['figureSizeOverride'] is None) else mode_parameters['figureSizeOverride']\n\t\n\treturn importedOutputCurvePlot(specificRunChipHistory, identifiers=identifiers, mode_parameters=mode_parameters)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
WOO_HOST = os.environ.get('WOO_HOST')
#WooCommerce key credentials
WOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')
WOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')
#XML feed fields and settings
XML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')
XML_SITE_NAME = os.environ.get('XML_SITE_NAME')
XML_SITE_HOST = os.environ.get('XML_SITE_HOST')
XML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION', 'Feed XML autogenerated')
XML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')
PRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')
CRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')
REDIS_HOST = os.environ.get('REDIS_HOST', 'redis')
SENTRY_URL = os.environ.get('SENTRY_URL')
try:
from local_settings import *
except ImportError:
pass
if SENTRY_URL:
import sentry_sdk
sentry_sdk.init(SENTRY_URL)
|
normal
|
{
"blob_id": "386fa51b9b285d36c75d6446f9348f6713e0dbaa",
"index": 2794,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from local_settings import *\nexcept ImportError:\n pass\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-3": "<mask token>\nWOO_HOST = os.environ.get('WOO_HOST')\nWOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')\nWOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')\nXML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')\nXML_SITE_NAME = os.environ.get('XML_SITE_NAME')\nXML_SITE_HOST = os.environ.get('XML_SITE_HOST')\nXML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION',\n 'Feed XML autogenerated')\nXML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')\nPRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')\nCRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')\nREDIS_HOST = os.environ.get('REDIS_HOST', 'redis')\nSENTRY_URL = os.environ.get('SENTRY_URL')\ntry:\n from local_settings import *\nexcept ImportError:\n pass\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-4": "import os\nWOO_HOST = os.environ.get('WOO_HOST')\nWOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')\nWOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')\nXML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')\nXML_SITE_NAME = os.environ.get('XML_SITE_NAME')\nXML_SITE_HOST = os.environ.get('XML_SITE_HOST')\nXML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION',\n 'Feed XML autogenerated')\nXML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')\nPRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')\nCRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')\nREDIS_HOST = os.environ.get('REDIS_HOST', 'redis')\nSENTRY_URL = os.environ.get('SENTRY_URL')\ntry:\n from local_settings import *\nexcept ImportError:\n pass\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-5": "import os\n\nWOO_HOST = os.environ.get('WOO_HOST')\n\n#WooCommerce key credentials\nWOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')\nWOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')\n\n#XML feed fields and settings\nXML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')\nXML_SITE_NAME = os.environ.get('XML_SITE_NAME')\nXML_SITE_HOST = os.environ.get('XML_SITE_HOST')\nXML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION', 'Feed XML autogenerated')\nXML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')\n\nPRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')\n\nCRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')\n\nREDIS_HOST = os.environ.get('REDIS_HOST', 'redis')\n\nSENTRY_URL = os.environ.get('SENTRY_URL')\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.namedWindow('mathches', 1)
cv2.imshow('mathches', a)
cv2.waitKey()
<|reserved_special_token_0|>
for m, n in matches:
if m.distance < 0.45 * n.distance:
good.append(m)
print(len(good))
<|reserved_special_token_0|>
cv2.namedWindow('result', 1)
cv2.imshow('result', result)
cv2.waitKey()
<|reserved_special_token_0|>
print(len(src_pts), len(dst_pts))
<|reserved_special_token_0|>
print('H:', H)
<|reserved_special_token_0|>
print('shft:', shft)
<|reserved_special_token_0|>
print('M:', M)
<|reserved_special_token_0|>
cv2.namedWindow('tiledImg1', cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg1', dst_corners)
cv2.waitKey()
<|reserved_special_token_0|>
cv2.namedWindow('tiledImg', cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg', dst_corners)
cv2.waitKey()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
leftgray = cv2.imread('../image/1.jpg')
rightgray = cv2.imread('../image/2.jpg')
hessian = 500
surf = cv2.xfeatures2d.SURF_create(hessian)
kp1, des1 = surf.detectAndCompute(leftgray, None)
kp2, des2 = surf.detectAndCompute(rightgray, None)
FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(des1, des2, k=2)
a = cv2.drawMatchesKnn(leftgray, kp1, rightgray, kp2, matches, None, flags=2)
cv2.namedWindow('mathches', 1)
cv2.imshow('mathches', a)
cv2.waitKey()
good = []
for m, n in matches:
if m.distance < 0.45 * n.distance:
good.append(m)
print(len(good))
result = cv2.drawMatches(leftgray, kp1, rightgray, kp2, good, None, flags=2)
cv2.namedWindow('result', 1)
cv2.imshow('result', result)
cv2.waitKey()
src_pts = np.array([kp1[m.queryIdx].pt for m in good])
dst_pts = np.array([kp2[m.trainIdx].pt for m in good])
print(len(src_pts), len(dst_pts))
H = cv2.findHomography(src_pts, dst_pts)
print('H:', H)
h, w = leftgray.shape[:2]
h1, w1 = rightgray.shape[:2]
shft = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]])
print('shft:', shft)
M = np.dot(shft, H[0])
print('M:', M)
dst_corners = cv2.warpPerspective(leftgray, M, (w * 2, h))
cv2.namedWindow('tiledImg1', cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg1', dst_corners)
cv2.waitKey()
dst_corners[0:h, w:w * 2] = rightgray
cv2.namedWindow('tiledImg', cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg', dst_corners)
cv2.waitKey()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
leftgray = cv2.imread('../image/1.jpg')
rightgray = cv2.imread('../image/2.jpg')
hessian = 500
surf = cv2.xfeatures2d.SURF_create(hessian)
kp1, des1 = surf.detectAndCompute(leftgray, None)
kp2, des2 = surf.detectAndCompute(rightgray, None)
FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(des1, des2, k=2)
a = cv2.drawMatchesKnn(leftgray, kp1, rightgray, kp2, matches, None, flags=2)
cv2.namedWindow('mathches', 1)
cv2.imshow('mathches', a)
cv2.waitKey()
good = []
for m, n in matches:
if m.distance < 0.45 * n.distance:
good.append(m)
print(len(good))
result = cv2.drawMatches(leftgray, kp1, rightgray, kp2, good, None, flags=2)
cv2.namedWindow('result', 1)
cv2.imshow('result', result)
cv2.waitKey()
src_pts = np.array([kp1[m.queryIdx].pt for m in good])
dst_pts = np.array([kp2[m.trainIdx].pt for m in good])
print(len(src_pts), len(dst_pts))
H = cv2.findHomography(src_pts, dst_pts)
print('H:', H)
h, w = leftgray.shape[:2]
h1, w1 = rightgray.shape[:2]
shft = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]])
print('shft:', shft)
M = np.dot(shft, H[0])
print('M:', M)
dst_corners = cv2.warpPerspective(leftgray, M, (w * 2, h))
cv2.namedWindow('tiledImg1', cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg1', dst_corners)
cv2.waitKey()
dst_corners[0:h, w:w * 2] = rightgray
cv2.namedWindow('tiledImg', cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg', dst_corners)
cv2.waitKey()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
#coding: utf-8
import numpy as np
import cv2
leftgray = cv2.imread('../image/1.jpg')
rightgray = cv2.imread('../image/2.jpg')
hessian=500
surf=cv2.xfeatures2d.SURF_create(hessian) #将Hessian Threshold设置为400,阈值越大能检测的特征就越少
kp1,des1=surf.detectAndCompute(leftgray,None) #查找关键点和描述符
kp2,des2=surf.detectAndCompute(rightgray,None)
FLANN_INDEX_KDTREE=0 #建立FLANN匹配器的参数
indexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5) #配置索引,密度树的数量为5
searchParams=dict(checks=50) #指定递归次数
#FlannBasedMatcher:是目前最快的特征匹配算法(最近邻搜索)
flann=cv2.FlannBasedMatcher(indexParams,searchParams) #建立匹配器
matches=flann.knnMatch(des1,des2,k=2) #得出匹配的关键点
a = cv2.drawMatchesKnn(leftgray, kp1, rightgray, kp2, matches,None, flags=2)
cv2.namedWindow("mathches",1)
cv2.imshow("mathches",a)
cv2.waitKey()
good=[]
#提取优秀的特征点
for m,n in matches:
if m.distance < 0.45*n.distance: #如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
good.append(m)
print(len(good))
result = cv2.drawMatches(leftgray, kp1, rightgray, kp2, good,None, flags=2)
cv2.namedWindow("result",1)
cv2.imshow("result",result)
cv2.waitKey()
src_pts = np.array([ kp1[m.queryIdx].pt for m in good]) #查询图像的特征描述子索引
dst_pts = np.array([ kp2[m.trainIdx].pt for m in good]) #训练(模板)图像的特征描述子索引
print(len(src_pts),len(dst_pts))
H=cv2.findHomography(src_pts,dst_pts) #生成变换矩阵
print('H:',H)
h,w=leftgray.shape[:2]
h1,w1=rightgray.shape[:2]
shft=np.array([[1.0,0,w],[0,1.0,0],[0,0,1.0]])
print('shft:',shft)
M=np.dot(shft,H[0]) #获取左边图像到右边图像的投影映射关系
print('M:',M)
dst_corners=cv2.warpPerspective(leftgray,M,(w*2 ,h))#透视变换,新图像可容纳完整的两幅图
cv2.namedWindow("tiledImg1" ,cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg1',dst_corners) #显示,第一幅图已在标准位置
cv2.waitKey()
dst_corners[0:h,w:w*2]=rightgray #将第二幅图放在右侧
#cv2.imwrite('tiled.jpg',dst_corners)
cv2.namedWindow("tiledImg" ,cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg',dst_corners)
#cv2.imshow('leftgray',leftgray)
#cv2.imshow('rightgray',rightgray)
cv2.waitKey()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "60953878c377382f1c7f25ce284c9fa12b8eb25f",
"index": 4667,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.namedWindow('mathches', 1)\ncv2.imshow('mathches', a)\ncv2.waitKey()\n<mask token>\nfor m, n in matches:\n if m.distance < 0.45 * n.distance:\n good.append(m)\nprint(len(good))\n<mask token>\ncv2.namedWindow('result', 1)\ncv2.imshow('result', result)\ncv2.waitKey()\n<mask token>\nprint(len(src_pts), len(dst_pts))\n<mask token>\nprint('H:', H)\n<mask token>\nprint('shft:', shft)\n<mask token>\nprint('M:', M)\n<mask token>\ncv2.namedWindow('tiledImg1', cv2.WINDOW_NORMAL)\ncv2.imshow('tiledImg1', dst_corners)\ncv2.waitKey()\n<mask token>\ncv2.namedWindow('tiledImg', cv2.WINDOW_NORMAL)\ncv2.imshow('tiledImg', dst_corners)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nleftgray = cv2.imread('../image/1.jpg')\nrightgray = cv2.imread('../image/2.jpg')\nhessian = 500\nsurf = cv2.xfeatures2d.SURF_create(hessian)\nkp1, des1 = surf.detectAndCompute(leftgray, None)\nkp2, des2 = surf.detectAndCompute(rightgray, None)\nFLANN_INDEX_KDTREE = 0\nindexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\nsearchParams = dict(checks=50)\nflann = cv2.FlannBasedMatcher(indexParams, searchParams)\nmatches = flann.knnMatch(des1, des2, k=2)\na = cv2.drawMatchesKnn(leftgray, kp1, rightgray, kp2, matches, None, flags=2)\ncv2.namedWindow('mathches', 1)\ncv2.imshow('mathches', a)\ncv2.waitKey()\ngood = []\nfor m, n in matches:\n if m.distance < 0.45 * n.distance:\n good.append(m)\nprint(len(good))\nresult = cv2.drawMatches(leftgray, kp1, rightgray, kp2, good, None, flags=2)\ncv2.namedWindow('result', 1)\ncv2.imshow('result', result)\ncv2.waitKey()\nsrc_pts = np.array([kp1[m.queryIdx].pt for m in good])\ndst_pts = np.array([kp2[m.trainIdx].pt for m in good])\nprint(len(src_pts), len(dst_pts))\nH = cv2.findHomography(src_pts, dst_pts)\nprint('H:', H)\nh, w = leftgray.shape[:2]\nh1, w1 = rightgray.shape[:2]\nshft = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]])\nprint('shft:', shft)\nM = np.dot(shft, H[0])\nprint('M:', M)\ndst_corners = cv2.warpPerspective(leftgray, M, (w * 2, h))\ncv2.namedWindow('tiledImg1', cv2.WINDOW_NORMAL)\ncv2.imshow('tiledImg1', dst_corners)\ncv2.waitKey()\ndst_corners[0:h, w:w * 2] = rightgray\ncv2.namedWindow('tiledImg', cv2.WINDOW_NORMAL)\ncv2.imshow('tiledImg', dst_corners)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nleftgray = cv2.imread('../image/1.jpg')\nrightgray = cv2.imread('../image/2.jpg')\nhessian = 500\nsurf = cv2.xfeatures2d.SURF_create(hessian)\nkp1, des1 = surf.detectAndCompute(leftgray, None)\nkp2, des2 = surf.detectAndCompute(rightgray, None)\nFLANN_INDEX_KDTREE = 0\nindexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\nsearchParams = dict(checks=50)\nflann = cv2.FlannBasedMatcher(indexParams, searchParams)\nmatches = flann.knnMatch(des1, des2, k=2)\na = cv2.drawMatchesKnn(leftgray, kp1, rightgray, kp2, matches, None, flags=2)\ncv2.namedWindow('mathches', 1)\ncv2.imshow('mathches', a)\ncv2.waitKey()\ngood = []\nfor m, n in matches:\n if m.distance < 0.45 * n.distance:\n good.append(m)\nprint(len(good))\nresult = cv2.drawMatches(leftgray, kp1, rightgray, kp2, good, None, flags=2)\ncv2.namedWindow('result', 1)\ncv2.imshow('result', result)\ncv2.waitKey()\nsrc_pts = np.array([kp1[m.queryIdx].pt for m in good])\ndst_pts = np.array([kp2[m.trainIdx].pt for m in good])\nprint(len(src_pts), len(dst_pts))\nH = cv2.findHomography(src_pts, dst_pts)\nprint('H:', H)\nh, w = leftgray.shape[:2]\nh1, w1 = rightgray.shape[:2]\nshft = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]])\nprint('shft:', shft)\nM = np.dot(shft, H[0])\nprint('M:', M)\ndst_corners = cv2.warpPerspective(leftgray, M, (w * 2, h))\ncv2.namedWindow('tiledImg1', cv2.WINDOW_NORMAL)\ncv2.imshow('tiledImg1', dst_corners)\ncv2.waitKey()\ndst_corners[0:h, w:w * 2] = rightgray\ncv2.namedWindow('tiledImg', cv2.WINDOW_NORMAL)\ncv2.imshow('tiledImg', dst_corners)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-5": "#coding: utf-8\nimport numpy as np\nimport cv2\n\n\nleftgray = cv2.imread('../image/1.jpg')\nrightgray = cv2.imread('../image/2.jpg')\n \nhessian=500\nsurf=cv2.xfeatures2d.SURF_create(hessian) #将Hessian Threshold设置为400,阈值越大能检测的特征就越少\nkp1,des1=surf.detectAndCompute(leftgray,None) #查找关键点和描述符\nkp2,des2=surf.detectAndCompute(rightgray,None)\n \nFLANN_INDEX_KDTREE=0 #建立FLANN匹配器的参数\nindexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5) #配置索引,密度树的数量为5\nsearchParams=dict(checks=50) #指定递归次数\n#FlannBasedMatcher:是目前最快的特征匹配算法(最近邻搜索)\nflann=cv2.FlannBasedMatcher(indexParams,searchParams) #建立匹配器\nmatches=flann.knnMatch(des1,des2,k=2) #得出匹配的关键点\n\na = cv2.drawMatchesKnn(leftgray, kp1, rightgray, kp2, matches,None, flags=2)\n\ncv2.namedWindow(\"mathches\",1)\ncv2.imshow(\"mathches\",a)\ncv2.waitKey()\n\ngood=[]\n#提取优秀的特征点\nfor m,n in matches:\n if m.distance < 0.45*n.distance: #如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留\n good.append(m)\nprint(len(good))\nresult = cv2.drawMatches(leftgray, kp1, rightgray, kp2, good,None, flags=2)\ncv2.namedWindow(\"result\",1)\ncv2.imshow(\"result\",result)\ncv2.waitKey()\n\nsrc_pts = np.array([ kp1[m.queryIdx].pt for m in good]) #查询图像的特征描述子索引\ndst_pts = np.array([ kp2[m.trainIdx].pt for m in good]) #训练(模板)图像的特征描述子索引\n\nprint(len(src_pts),len(dst_pts))\nH=cv2.findHomography(src_pts,dst_pts) #生成变换矩阵\n\nprint('H:',H)\n \nh,w=leftgray.shape[:2]\nh1,w1=rightgray.shape[:2]\nshft=np.array([[1.0,0,w],[0,1.0,0],[0,0,1.0]])\nprint('shft:',shft)\nM=np.dot(shft,H[0]) #获取左边图像到右边图像的投影映射关系\nprint('M:',M)\ndst_corners=cv2.warpPerspective(leftgray,M,(w*2 ,h))#透视变换,新图像可容纳完整的两幅图\ncv2.namedWindow(\"tiledImg1\" ,cv2.WINDOW_NORMAL)\ncv2.imshow('tiledImg1',dst_corners) #显示,第一幅图已在标准位置\n\ncv2.waitKey()\ndst_corners[0:h,w:w*2]=rightgray #将第二幅图放在右侧\n#cv2.imwrite('tiled.jpg',dst_corners)\ncv2.namedWindow(\"tiledImg\" ,cv2.WINDOW_NORMAL)\n \ncv2.imshow('tiledImg',dst_corners)\n#cv2.imshow('leftgray',leftgray)\n#cv2.imshow('rightgray',rightgray)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main():
counts = open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line = line.strip('\n')
url = line
try:
r = requests.get(url, verify=True, timeout=3)
print(url + ' ' + str(r.status_code))
print(str(r.text))
if r.status_code == 200 and 'MPEGVideo' in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i + '\n')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
counts = open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line = line.strip('\n')
url = line
try:
r = requests.get(url, verify=True, timeout=3)
print(url + ' ' + str(r.status_code))
print(str(r.text))
if r.status_code == 200 and 'MPEGVideo' in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i + '\n')
if __name__ == '__main__':
file_200 = open('result_uWSGI_file.txt', 'w')
main()
file_200.flush()
file_200.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
result_url = []
def main():
counts = open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line = line.strip('\n')
url = line
try:
r = requests.get(url, verify=True, timeout=3)
print(url + ' ' + str(r.status_code))
print(str(r.text))
if r.status_code == 200 and 'MPEGVideo' in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i + '\n')
if __name__ == '__main__':
file_200 = open('result_uWSGI_file.txt', 'w')
main()
file_200.flush()
file_200.close()
<|reserved_special_token_1|>
import requests, sys
result_url = []
def main():
counts = open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line = line.strip('\n')
url = line
try:
r = requests.get(url, verify=True, timeout=3)
print(url + ' ' + str(r.status_code))
print(str(r.text))
if r.status_code == 200 and 'MPEGVideo' in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i + '\n')
if __name__ == '__main__':
file_200 = open('result_uWSGI_file.txt', 'w')
main()
file_200.flush()
file_200.close()
<|reserved_special_token_1|>
#coding=utf-8
import requests,sys
result_url=[]
def main():
counts=open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line=line.strip("\n")
url=line
try:
#url="http://s6000.sgcc.com.cn/WebContent/s6000/main/index.jsp#no-back"
r=requests.get(url,verify=True,timeout=3)
print(url+" "+str(r.status_code))
print(str(r.text))
if r.status_code==200 and "MPEGVideo" in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i+"\n")
if __name__ == '__main__':
file_200=open("result_uWSGI_file.txt","w")
main()
file_200.flush()
file_200.close()
|
flexible
|
{
"blob_id": "96a4659f03879e051af95b5aa9c1e1364015fb86",
"index": 8723,
"step-1": "<mask token>\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-3": "<mask token>\nresult_url = []\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-4": "import requests, sys\nresult_url = []\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-5": "#coding=utf-8\r\nimport requests,sys\r\nresult_url=[]\r\n\r\ndef main():\r\n counts=open(sys.argv[1]).readlines()\r\n for line in open(sys.argv[1]):\r\n line=line.strip(\"\\n\")\r\n url=line\r\n try:\r\n #url=\"http://s6000.sgcc.com.cn/WebContent/s6000/main/index.jsp#no-back\"\r\n r=requests.get(url,verify=True,timeout=3)\r\n print(url+\" \"+str(r.status_code))\r\n print(str(r.text))\r\n if r.status_code==200 and \"MPEGVideo\" in r.text:\r\n result_url.append(url) \r\n except Exception as e:\r\n print(str(e))\r\n for i in result_url:\r\n print(i)\r\n file_200.write(i+\"\\n\")\r\n\r\nif __name__ == '__main__':\r\n file_200=open(\"result_uWSGI_file.txt\",\"w\") \r\n main()\r\n file_200.flush() \r\n file_200.close() \r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 2.2.3 on 2019-07-14 13:34
from django.db import migrations, models
def forwards_func(apps, schema_editor):
""" Add Theater Rooms """
TheaterRoom = apps.get_model("main", "TheaterRoom")
db_alias = schema_editor.connection.alias
TheaterRoom.objects.using(db_alias).bulk_create([
TheaterRoom(name="Red Room", rows_count=10, seats_per_row_count=15),
TheaterRoom(name="Blue Room", rows_count=20, seats_per_row_count=30),
])
def reverse_func(apps, schema_editor):
""" No need to do anything since the table is dropped completely """
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TheaterRoom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('rows_count', models.IntegerField()),
('seats_per_row_count', models.IntegerField()),
],
),
migrations.RunPython(forwards_func, reverse_func),
]
|
normal
|
{
"blob_id": "a4b61a5a79e314e56ba25c6e2e735bd2ee4ef0d3",
"index": 4551,
"step-1": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='TheaterRoom', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20\n )), ('rows_count', models.IntegerField()), ('seats_per_row_count',\n models.IntegerField())]), migrations.RunPython(forwards_func,\n reverse_func)]\n",
"step-3": "<mask token>\n\n\ndef forwards_func(apps, schema_editor):\n \"\"\" Add Theater Rooms \"\"\"\n TheaterRoom = apps.get_model('main', 'TheaterRoom')\n db_alias = schema_editor.connection.alias\n TheaterRoom.objects.using(db_alias).bulk_create([TheaterRoom(name=\n 'Red Room', rows_count=10, seats_per_row_count=15), TheaterRoom(\n name='Blue Room', rows_count=20, seats_per_row_count=30)])\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='TheaterRoom', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20\n )), ('rows_count', models.IntegerField()), ('seats_per_row_count',\n models.IntegerField())]), migrations.RunPython(forwards_func,\n reverse_func)]\n",
"step-4": "from django.db import migrations, models\n\n\ndef forwards_func(apps, schema_editor):\n \"\"\" Add Theater Rooms \"\"\"\n TheaterRoom = apps.get_model('main', 'TheaterRoom')\n db_alias = schema_editor.connection.alias\n TheaterRoom.objects.using(db_alias).bulk_create([TheaterRoom(name=\n 'Red Room', rows_count=10, seats_per_row_count=15), TheaterRoom(\n name='Blue Room', rows_count=20, seats_per_row_count=30)])\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='TheaterRoom', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20\n )), ('rows_count', models.IntegerField()), ('seats_per_row_count',\n models.IntegerField())]), migrations.RunPython(forwards_func,\n reverse_func)]\n",
"step-5": "# Generated by Django 2.2.3 on 2019-07-14 13:34\n\nfrom django.db import migrations, models\n\n\ndef forwards_func(apps, schema_editor):\n \"\"\" Add Theater Rooms \"\"\"\n TheaterRoom = apps.get_model(\"main\", \"TheaterRoom\")\n db_alias = schema_editor.connection.alias\n TheaterRoom.objects.using(db_alias).bulk_create([\n TheaterRoom(name=\"Red Room\", rows_count=10, seats_per_row_count=15),\n TheaterRoom(name=\"Blue Room\", rows_count=20, seats_per_row_count=30),\n ])\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='TheaterRoom',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20)),\n ('rows_count', models.IntegerField()),\n ('seats_per_row_count', models.IntegerField()),\n ],\n ),\n migrations.RunPython(forwards_func, reverse_func),\n ]\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
class Line:
def __init__(self,coor1,coor2):
self.coor1 = coor1
self.coor2 = coor2
def slope(self):
pass
def distance(self):
#x = self.coor1[0]-self.coor2[0]
#y = self.coor2[1]-self.coor2[1]
#return ((x**2)+(y**2))**0.5
return ((((self.coor2[0]-self.coor1[0])**2)+((self.coor2[1]-self.coor1[1])**2))**0.5)
def slope(self):
return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0])
def __str__(self):
return f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'
line1 = Line((3,4),(5,6))
li = Line((3,2),(8,10))
print(li.distance())
print(line1.coor1[0])
print(line1.distance())
print(li)
class Cylinder:
pi = 3.14
def __init__(self,height=1,radius=1):
self.height = height
self.radius = radius
def volume(self):
return self.pi*self.radius**2*self.height
def surface_area(self):
return 2*self.pi*self.radius**2
def __str__(self):
return f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'
c = Cylinder(2,3)
print(c)
class Account:
def __init__(self,name,balance):
self.name=name
self.balance=balance
def deposit(self,money):
self.balance += money
return 'Deposit accepted'
def withdraw(self,moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
acct1 = Account('jose',100)
print(acct1)
print(acct1.withdraw(1000))
print(acct1.balance)
print(acct1.deposit(101))
print(acct1.balance)
|
normal
|
{
"blob_id": "f91e997b305348485698d180b97138b040285b60",
"index": 9440,
"step-1": "<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-2": "class Line:\n <mask token>\n <mask token>\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\n<mask token>\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\n<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-3": "class Line:\n\n def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2\n <mask token>\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\n<mask token>\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\n<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-4": "class Line:\n\n def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2\n\n def slope(self):\n pass\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\nline1 = Line((3, 4), (5, 6))\nli = Line((3, 2), (8, 10))\nprint(li.distance())\nprint(line1.coor1[0])\nprint(line1.distance())\nprint(li)\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\nc = Cylinder(2, 3)\nprint(c)\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\nacct1 = Account('jose', 100)\nprint(acct1)\nprint(acct1.withdraw(1000))\nprint(acct1.balance)\nprint(acct1.deposit(101))\nprint(acct1.balance)\n",
"step-5": "class Line:\n def __init__(self,coor1,coor2):\n self.coor1 = coor1\n self.coor2 = coor2 \n \n def slope(self):\n pass\n\n def distance(self):\n #x = self.coor1[0]-self.coor2[0]\n #y = self.coor2[1]-self.coor2[1]\n #return ((x**2)+(y**2))**0.5\n return ((((self.coor2[0]-self.coor1[0])**2)+((self.coor2[1]-self.coor1[1])**2))**0.5)\n\n def slope(self):\n return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0])\n\n\n\n def __str__(self):\n return f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n\n\nline1 = Line((3,4),(5,6))\nli = Line((3,2),(8,10))\nprint(li.distance())\nprint(line1.coor1[0])\nprint(line1.distance())\nprint(li)\n\nclass Cylinder:\n\n pi = 3.14\n \n def __init__(self,height=1,radius=1):\n self.height = height\n self.radius = radius\n\n\n def volume(self):\n return self.pi*self.radius**2*self.height\n\n def surface_area(self):\n return 2*self.pi*self.radius**2\n\n def __str__(self):\n return f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n\nc = Cylinder(2,3)\n\nprint(c)\n\nclass Account: \n def __init__(self,name,balance):\n self.name=name\n self.balance=balance\n\n def deposit(self,money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self,moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable' \n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n \n \n\n \n\nacct1 = Account('jose',100)\n\n\n\nprint(acct1)\nprint(acct1.withdraw(1000))\nprint(acct1.balance)\nprint(acct1.deposit(101))\nprint(acct1.balance)\n",
"step-ids": [
5,
15,
16,
19,
20
]
}
|
[
5,
15,
16,
19,
20
] |
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from meizi.items import MeiziItem
class MztspiderSpider(CrawlSpider):
name = 'mztspider2'
allowed_domains = ['meizitu.com']
start_urls = ['http://www.meizitu.com/a/list_1_%s.html' % urlnum for urlnum in range(1, 92)]
rules = (
Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths='//ul[@class="wp-list clearfix"]/li/div/div/a'),
callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
srcs = sel.xpath('//div[@id="picture"]/p/img/@src').extract()
item = MeiziItem()
item['image_urls'] = srcs
yield item
|
normal
|
{
"blob_id": "a1ce43c3f64667619c4964bc4dc67215d3ecc1a0",
"index": 9215,
"step-1": "<mask token>\n\n\nclass MztspiderSpider(CrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MztspiderSpider(CrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-3": "<mask token>\n\n\nclass MztspiderSpider(CrawlSpider):\n name = 'mztspider2'\n allowed_domains = ['meizitu.com']\n start_urls = [('http://www.meizitu.com/a/list_1_%s.html' % urlnum) for\n urlnum in range(1, 92)]\n rules = Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths=\n '//ul[@class=\"wp-list clearfix\"]/li/div/div/a'), callback=\n 'parse_item', follow=True),\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-4": "import scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.selector import Selector\nfrom meizi.items import MeiziItem\n\n\nclass MztspiderSpider(CrawlSpider):\n name = 'mztspider2'\n allowed_domains = ['meizitu.com']\n start_urls = [('http://www.meizitu.com/a/list_1_%s.html' % urlnum) for\n urlnum in range(1, 92)]\n rules = Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths=\n '//ul[@class=\"wp-list clearfix\"]/li/div/div/a'), callback=\n 'parse_item', follow=True),\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.selector import Selector\nfrom meizi.items import MeiziItem\n\n\nclass MztspiderSpider(CrawlSpider):\n name = 'mztspider2'\n allowed_domains = ['meizitu.com']\n start_urls = ['http://www.meizitu.com/a/list_1_%s.html' % urlnum for urlnum in range(1, 92)]\n\n rules = (\n Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths='//ul[@class=\"wp-list clearfix\"]/li/div/div/a'),\n callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n sel = Selector(response)\n srcs = sel.xpath('//div[@id=\"picture\"]/p/img/@src').extract()\n item = MeiziItem()\n item['image_urls'] = srcs\n yield item\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def blink_connected_to_wifi(pin=23):
_blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])
<|reserved_special_token_0|>
def _blink_pattern(pin, pattern):
p = Pin(pin, Pin.OUT)
try:
for item in pattern:
for j in range(item[0]):
p.value(1)
time.sleep(item[1])
p.value(0)
time.sleep(item[2])
except:
p.value(0)
Pin(pin, Pin.IN)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def blink_connected_to_wifi(pin=23):
_blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])
def blink_not_connected_to_wifi(pin=23):
_blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1,
0.5, 0.5]])
def _blink_pattern(pin, pattern):
p = Pin(pin, Pin.OUT)
try:
for item in pattern:
for j in range(item[0]):
p.value(1)
time.sleep(item[1])
p.value(0)
time.sleep(item[2])
except:
p.value(0)
Pin(pin, Pin.IN)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('LOAD: blinker.py')
def blink_connected_to_wifi(pin=23):
_blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])
def blink_not_connected_to_wifi(pin=23):
_blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1,
0.5, 0.5]])
def _blink_pattern(pin, pattern):
p = Pin(pin, Pin.OUT)
try:
for item in pattern:
for j in range(item[0]):
p.value(1)
time.sleep(item[1])
p.value(0)
time.sleep(item[2])
except:
p.value(0)
Pin(pin, Pin.IN)
<|reserved_special_token_1|>
import sys, time
from machine import Pin
print('LOAD: blinker.py')
def blink_connected_to_wifi(pin=23):
_blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])
def blink_not_connected_to_wifi(pin=23):
_blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1,
0.5, 0.5]])
def _blink_pattern(pin, pattern):
p = Pin(pin, Pin.OUT)
try:
for item in pattern:
for j in range(item[0]):
p.value(1)
time.sleep(item[1])
p.value(0)
time.sleep(item[2])
except:
p.value(0)
Pin(pin, Pin.IN)
<|reserved_special_token_1|>
import sys, time
from machine import Pin
print('LOAD: blinker.py')
def blink_connected_to_wifi(pin=23):
_blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])
def blink_not_connected_to_wifi(pin=23):
_blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, 0.5, 0.5]])
# pin - the pin, connected to LED
# pattern - the array of items: [blink_count, on-period, off-period]
def _blink_pattern(pin, pattern):
p = Pin(pin, Pin.OUT)
try:
for item in pattern:
for j in range(item[0]):
p.value(1)
time.sleep(item[1])
p.value(0)
time.sleep(item[2])
except:
p.value(0)
Pin(pin, Pin.IN)
|
flexible
|
{
"blob_id": "c0bd060990d00ab50c9f2d3060b7f975ff16e1ab",
"index": 4105,
"step-1": "<mask token>\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\n<mask token>\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n",
"step-2": "<mask token>\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, \n 0.5, 0.5]])\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n",
"step-3": "<mask token>\nprint('LOAD: blinker.py')\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, \n 0.5, 0.5]])\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n",
"step-4": "import sys, time\nfrom machine import Pin\nprint('LOAD: blinker.py')\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, \n 0.5, 0.5]])\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n",
"step-5": "import sys, time\nfrom machine import Pin\n\nprint('LOAD: blinker.py')\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, 0.5, 0.5]])\n\n# pin - the pin, connected to LED\n# pattern - the array of items: [blink_count, on-period, off-period]\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Rectangle():
def __init__(self,length,breadth):
self.length=length
self.breadth=breadth
def area(self):
return(self.length*self.breadth)
def perimeter(self):
return(2*(self.length+self.breadth))
r1=Rectangle(4,5)
r2=Rectangle(5,7)
a1=r1.area()
a2=r2.area()
p1=r1.perimeter()
p2=r2.perimeter()
print("the area of rectangle 1 :",a1)
print("the perimeter of rectangle 1:",p1)
print("the area of rectangle 2:",a2)
print("the perimeter of rectangle 2:",p2)
if(a1>a2):
print("rectangle 1 is bigger")
else:
print("rectangle 2 is bigger")
|
normal
|
{
"blob_id": "d5691403812cd3742f8e8b74d4ca613eca784ffd",
"index": 9677,
"step-1": "class Rectangle:\n <mask token>\n <mask token>\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\n<mask token>\n",
"step-2": "class Rectangle:\n\n def __init__(self, length, breadth):\n self.length = length\n self.breadth = breadth\n <mask token>\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\n<mask token>\n",
"step-3": "class Rectangle:\n\n def __init__(self, length, breadth):\n self.length = length\n self.breadth = breadth\n\n def area(self):\n return self.length * self.breadth\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\n<mask token>\n",
"step-4": "class Rectangle:\n\n def __init__(self, length, breadth):\n self.length = length\n self.breadth = breadth\n\n def area(self):\n return self.length * self.breadth\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\nr1 = Rectangle(4, 5)\nr2 = Rectangle(5, 7)\na1 = r1.area()\na2 = r2.area()\np1 = r1.perimeter()\np2 = r2.perimeter()\nprint('the area of rectangle 1 :', a1)\nprint('the perimeter of rectangle 1:', p1)\nprint('the area of rectangle 2:', a2)\nprint('the perimeter of rectangle 2:', p2)\nif a1 > a2:\n print('rectangle 1 is bigger')\nelse:\n print('rectangle 2 is bigger')\n",
"step-5": "class Rectangle():\n def __init__(self,length,breadth):\n self.length=length\n self.breadth=breadth\n def area(self):\n return(self.length*self.breadth)\n def perimeter(self):\n return(2*(self.length+self.breadth))\nr1=Rectangle(4,5)\nr2=Rectangle(5,7)\na1=r1.area()\na2=r2.area()\np1=r1.perimeter()\np2=r2.perimeter()\nprint(\"the area of rectangle 1 :\",a1)\nprint(\"the perimeter of rectangle 1:\",p1)\nprint(\"the area of rectangle 2:\",a2)\nprint(\"the perimeter of rectangle 2:\",p2)\nif(a1>a2):\n print(\"rectangle 1 is bigger\")\nelse:\n print(\"rectangle 2 is bigger\")",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
assert os.path.isdir(os.environ['OUT_DIR'])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ['OUT_DIR'] = os.path.abspath('.')
assert os.path.isdir(os.environ['OUT_DIR'])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ))
<|reserved_special_token_1|>
import subprocess
import sys
import os
os.environ['OUT_DIR'] = os.path.abspath('.')
assert os.path.isdir(os.environ['OUT_DIR'])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ))
<|reserved_special_token_1|>
#!/usr/bin/env python
# This file just executes its arguments, except that also adds OUT_DIR to the
# environ. This is for compatibility with cargo.
import subprocess
import sys
import os
os.environ["OUT_DIR"] = os.path.abspath(".")
assert os.path.isdir(os.environ["OUT_DIR"])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ))
|
flexible
|
{
"blob_id": "be238268b9fdd565f3cb0770839789b702940ef9",
"index": 8248,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nassert os.path.isdir(os.environ['OUT_DIR'])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-3": "<mask token>\nos.environ['OUT_DIR'] = os.path.abspath('.')\nassert os.path.isdir(os.environ['OUT_DIR'])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-4": "import subprocess\nimport sys\nimport os\nos.environ['OUT_DIR'] = os.path.abspath('.')\nassert os.path.isdir(os.environ['OUT_DIR'])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-5": "#!/usr/bin/env python\n# This file just executes its arguments, except that also adds OUT_DIR to the\n# environ. This is for compatibility with cargo.\nimport subprocess\nimport sys\nimport os\n\nos.environ[\"OUT_DIR\"] = os.path.abspath(\".\")\nassert os.path.isdir(os.environ[\"OUT_DIR\"])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
<|reserved_special_token_0|>
TOKENS_PATH = '/tmp/twitter-tokens.json'
REDIRECT_TO = ''
FLASK_SECRET = 'S$2[ShC-=BKKOQ.Z-|fa 6f;,5 <[QngmG)}5,s%0vX>B}?o-0X9PM;.dN{jo7'
<|reserved_special_token_1|>
'''
Twitter settings
Input your app credentials below
https://apps.twitter.com
'''
# consumer key
CONSUMER_KEY = ''
# consumer secret
CONSUMER_SECRET = ''
'''
App settings
'''
# Where to save tokens (JSON)
TOKENS_PATH = '/tmp/twitter-tokens.json'
# Redirect-back to URL after authenticated (optional)
REDIRECT_TO = ''
# secret key for flask
FLASK_SECRET = 'S$2[ShC-=BKKOQ.Z-|fa 6f;,5 <[QngmG)}5,s%0vX>B}?o-0X9PM;.dN{jo7'
|
flexible
|
{
"blob_id": "9cc64edc81ab39b0ab2cd47661c9809545b03ac6",
"index": 3230,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\n<mask token>\nTOKENS_PATH = '/tmp/twitter-tokens.json'\nREDIRECT_TO = ''\nFLASK_SECRET = 'S$2[ShC-=BKKOQ.Z-|fa 6f;,5 <[QngmG)}5,s%0vX>B}?o-0X9PM;.dN{jo7'\n",
"step-3": "'''\nTwitter settings\nInput your app credentials below\nhttps://apps.twitter.com\n'''\n\n# consumer key\nCONSUMER_KEY = ''\n# consumer secret\nCONSUMER_SECRET = ''\n\n'''\nApp settings\n'''\n# Where to save tokens (JSON)\nTOKENS_PATH = '/tmp/twitter-tokens.json'\n\n# Redirect-back to URL after authenticated (optional)\nREDIRECT_TO = ''\n\n# secret key for flask\nFLASK_SECRET = 'S$2[ShC-=BKKOQ.Z-|fa 6f;,5 <[QngmG)}5,s%0vX>B}?o-0X9PM;.dN{jo7'",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
ret, frame = cap.read()
frame = np.float32(frame) / 255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack / frames
cv2.imshow('frame', np.uint8(average_stack * 255))
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
average_stack = np.float32(np.copy(frame)) / 255
frames = 1.0
while True:
ret, frame = cap.read()
frame = np.float32(frame) / 255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack / frames
cv2.imshow('frame', np.uint8(average_stack * 255))
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
average_stack = np.float32(np.copy(frame)) / 255
frames = 1.0
while True:
ret, frame = cap.read()
frame = np.float32(frame) / 255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack / frames
cv2.imshow('frame', np.uint8(average_stack * 255))
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
average_stack = np.float32(np.copy(frame))/255
frames = 1.0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = np.float32(frame)/255
average_stack = average_stack * frames + frame
frames += 1.0
average_stack = average_stack/frames
# Display the resulting frame
cv2.imshow('frame',np.uint8(average_stack*255))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "7fd89272d3d3584f35fd8f552cb7b14e57b7ed1b",
"index": 1591,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n ret, frame = cap.read()\n frame = np.float32(frame) / 255\n average_stack = average_stack * frames + frame\n frames += 1.0\n average_stack = average_stack / frames\n cv2.imshow('frame', np.uint8(average_stack * 255))\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\ncap = cv2.VideoCapture(0)\nret, frame = cap.read()\naverage_stack = np.float32(np.copy(frame)) / 255\nframes = 1.0\nwhile True:\n ret, frame = cap.read()\n frame = np.float32(frame) / 255\n average_stack = average_stack * frames + frame\n frames += 1.0\n average_stack = average_stack / frames\n cv2.imshow('frame', np.uint8(average_stack * 255))\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nimport time\ncap = cv2.VideoCapture(0)\nret, frame = cap.read()\naverage_stack = np.float32(np.copy(frame)) / 255\nframes = 1.0\nwhile True:\n ret, frame = cap.read()\n frame = np.float32(frame) / 255\n average_stack = average_stack * frames + frame\n frames += 1.0\n average_stack = average_stack / frames\n cv2.imshow('frame', np.uint8(average_stack * 255))\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-5": "import numpy as np\nimport cv2\nimport time\n\ncap = cv2.VideoCapture(0)\nret, frame = cap.read()\naverage_stack = np.float32(np.copy(frame))/255\nframes = 1.0\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n frame = np.float32(frame)/255\n\n average_stack = average_stack * frames + frame\n frames += 1.0\n average_stack = average_stack/frames\n\n # Display the resulting frame\n cv2.imshow('frame',np.uint8(average_stack*255))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
if __name__ == '__main__':
import sys
import os
sys.path.insert(0, os.path.abspath('config'))
import configure
configure_options = [
'CC=icc',
'CXX=icpc',
'FC=ifort',
'--with-blas-lapack-dir=/soft/com/packages/intel/13/update5/mkl/',
'--with-mkl_pardiso-dir=/soft/com/packages/intel/13/update5/mkl/',
'--download-mpich=1',
]
configure.petsc_configure(configure_options)
|
normal
|
{
"blob_id": "43eb221758ebcf1f01851fc6cda67b72f32a73c7",
"index": 6992,
"step-1": "<mask token>\n",
"step-2": "if __name__ == '__main__':\n import sys\n import os\n sys.path.insert(0, os.path.abspath('config'))\n import configure\n configure_options = ['CC=icc', 'CXX=icpc', 'FC=ifort',\n '--with-blas-lapack-dir=/soft/com/packages/intel/13/update5/mkl/',\n '--with-mkl_pardiso-dir=/soft/com/packages/intel/13/update5/mkl/',\n '--download-mpich=1']\n configure.petsc_configure(configure_options)\n",
"step-3": "#!/usr/bin/python\nif __name__ == '__main__':\n import sys\n import os\n sys.path.insert(0, os.path.abspath('config'))\n import configure\n configure_options = [\n 'CC=icc',\n 'CXX=icpc',\n 'FC=ifort',\n '--with-blas-lapack-dir=/soft/com/packages/intel/13/update5/mkl/',\n '--with-mkl_pardiso-dir=/soft/com/packages/intel/13/update5/mkl/',\n '--download-mpich=1',\n ]\n configure.petsc_configure(configure_options)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from dependency_injector import containers, providers
from src.repositories import MemcachedRepository
from src.services import FibonacciService
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
cache_repository = providers.Singleton(MemcachedRepository,
host=config.host,
port=config.port)
fibonacci_service = providers.Factory(
FibonacciService,
cache_repository=cache_repository,
)
|
normal
|
{
"blob_id": "e8ba1ae98b247eaf90d83339e5fdc27287a70c73",
"index": 2561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Container(containers.DeclarativeContainer):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Container(containers.DeclarativeContainer):\n config = providers.Configuration()\n cache_repository = providers.Singleton(MemcachedRepository, host=config\n .host, port=config.port)\n fibonacci_service = providers.Factory(FibonacciService,\n cache_repository=cache_repository)\n",
"step-4": "from dependency_injector import containers, providers\nfrom src.repositories import MemcachedRepository\nfrom src.services import FibonacciService\n\n\nclass Container(containers.DeclarativeContainer):\n config = providers.Configuration()\n cache_repository = providers.Singleton(MemcachedRepository, host=config\n .host, port=config.port)\n fibonacci_service = providers.Factory(FibonacciService,\n cache_repository=cache_repository)\n",
"step-5": "from dependency_injector import containers, providers\n\nfrom src.repositories import MemcachedRepository\nfrom src.services import FibonacciService\n\n\nclass Container(containers.DeclarativeContainer):\n config = providers.Configuration()\n\n cache_repository = providers.Singleton(MemcachedRepository,\n host=config.host,\n port=config.port)\n\n fibonacci_service = providers.Factory(\n FibonacciService,\n cache_repository=cache_repository,\n )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import json
base_url = f"https://api.telegram.org/bot"
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates(TOKEN):
url = f'{base_url}{TOKEN}/getUpdates'
js = get_json_from_url(url)
if not js['ok']:
print('Error: Invalid telegram token!')
exit(0)
return js
def get_last_chat_id_and_text(updates):
num_updates = len(updates["result"])
if num_updates == 0:
print('Error: Please send a message to the bot to initialize!')
exit(0)
last_update = num_updates - 1
text = updates["result"][last_update]["message"]["text"]
msg_timestamp = updates["result"][last_update]["message"]["date"]
chat_id = updates["result"][last_update]["message"]["chat"]["id"]
return text, msg_timestamp, chat_id
def send_message(TOKEN, text, chat_id):
url = f'{base_url}{TOKEN}/sendMessage?text={text}&chat_id={chat_id}'
resp = json.loads(get_url(url))
if not resp['ok']:
print('Error: Invalid telegram chat_id! Please delete cached.')
exit(0)
def initialize_bot(TOKEN):
get_updates(TOKEN) # ensure token is valid
# in case the bot doesn't have a recent incoming message, cached will prevent failing
try:
f_cached = open('cached', 'rt')
except FileNotFoundError:
_, _, chat_id = get_last_chat_id_and_text(get_updates(TOKEN))
with open('cached', 'wt') as f_cached:
json.dump({'chat_id': chat_id}, f_cached)
else:
chat_id = json.load(f_cached)['chat_id']
send_message(TOKEN, 'Bot initialized.', chat_id) # ensure chat_id is valid
return chat_id
|
normal
|
{
"blob_id": "501614f9c7df3c862c9951ea343964b6ed47e74a",
"index": 3204,
"step-1": "<mask token>\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode('utf8')\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(TOKEN):\n url = f'{base_url}{TOKEN}/getUpdates'\n js = get_json_from_url(url)\n if not js['ok']:\n print('Error: Invalid telegram token!')\n exit(0)\n return js\n\n\n<mask token>\n\n\ndef initialize_bot(TOKEN):\n get_updates(TOKEN)\n try:\n f_cached = open('cached', 'rt')\n except FileNotFoundError:\n _, _, chat_id = get_last_chat_id_and_text(get_updates(TOKEN))\n with open('cached', 'wt') as f_cached:\n json.dump({'chat_id': chat_id}, f_cached)\n else:\n chat_id = json.load(f_cached)['chat_id']\n send_message(TOKEN, 'Bot initialized.', chat_id)\n return chat_id\n",
"step-2": "<mask token>\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode('utf8')\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(TOKEN):\n url = f'{base_url}{TOKEN}/getUpdates'\n js = get_json_from_url(url)\n if not js['ok']:\n print('Error: Invalid telegram token!')\n exit(0)\n return js\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates['result'])\n if num_updates == 0:\n print('Error: Please send a message to the bot to initialize!')\n exit(0)\n last_update = num_updates - 1\n text = updates['result'][last_update]['message']['text']\n msg_timestamp = updates['result'][last_update]['message']['date']\n chat_id = updates['result'][last_update]['message']['chat']['id']\n return text, msg_timestamp, chat_id\n\n\ndef send_message(TOKEN, text, chat_id):\n url = f'{base_url}{TOKEN}/sendMessage?text={text}&chat_id={chat_id}'\n resp = json.loads(get_url(url))\n if not resp['ok']:\n print('Error: Invalid telegram chat_id! Please delete cached.')\n exit(0)\n\n\ndef initialize_bot(TOKEN):\n get_updates(TOKEN)\n try:\n f_cached = open('cached', 'rt')\n except FileNotFoundError:\n _, _, chat_id = get_last_chat_id_and_text(get_updates(TOKEN))\n with open('cached', 'wt') as f_cached:\n json.dump({'chat_id': chat_id}, f_cached)\n else:\n chat_id = json.load(f_cached)['chat_id']\n send_message(TOKEN, 'Bot initialized.', chat_id)\n return chat_id\n",
"step-3": "<mask token>\nbase_url = f'https://api.telegram.org/bot'\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode('utf8')\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(TOKEN):\n url = f'{base_url}{TOKEN}/getUpdates'\n js = get_json_from_url(url)\n if not js['ok']:\n print('Error: Invalid telegram token!')\n exit(0)\n return js\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates['result'])\n if num_updates == 0:\n print('Error: Please send a message to the bot to initialize!')\n exit(0)\n last_update = num_updates - 1\n text = updates['result'][last_update]['message']['text']\n msg_timestamp = updates['result'][last_update]['message']['date']\n chat_id = updates['result'][last_update]['message']['chat']['id']\n return text, msg_timestamp, chat_id\n\n\ndef send_message(TOKEN, text, chat_id):\n url = f'{base_url}{TOKEN}/sendMessage?text={text}&chat_id={chat_id}'\n resp = json.loads(get_url(url))\n if not resp['ok']:\n print('Error: Invalid telegram chat_id! Please delete cached.')\n exit(0)\n\n\ndef initialize_bot(TOKEN):\n get_updates(TOKEN)\n try:\n f_cached = open('cached', 'rt')\n except FileNotFoundError:\n _, _, chat_id = get_last_chat_id_and_text(get_updates(TOKEN))\n with open('cached', 'wt') as f_cached:\n json.dump({'chat_id': chat_id}, f_cached)\n else:\n chat_id = json.load(f_cached)['chat_id']\n send_message(TOKEN, 'Bot initialized.', chat_id)\n return chat_id\n",
"step-4": "import requests\nimport json\nbase_url = f'https://api.telegram.org/bot'\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode('utf8')\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(TOKEN):\n url = f'{base_url}{TOKEN}/getUpdates'\n js = get_json_from_url(url)\n if not js['ok']:\n print('Error: Invalid telegram token!')\n exit(0)\n return js\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates['result'])\n if num_updates == 0:\n print('Error: Please send a message to the bot to initialize!')\n exit(0)\n last_update = num_updates - 1\n text = updates['result'][last_update]['message']['text']\n msg_timestamp = updates['result'][last_update]['message']['date']\n chat_id = updates['result'][last_update]['message']['chat']['id']\n return text, msg_timestamp, chat_id\n\n\ndef send_message(TOKEN, text, chat_id):\n url = f'{base_url}{TOKEN}/sendMessage?text={text}&chat_id={chat_id}'\n resp = json.loads(get_url(url))\n if not resp['ok']:\n print('Error: Invalid telegram chat_id! Please delete cached.')\n exit(0)\n\n\ndef initialize_bot(TOKEN):\n get_updates(TOKEN)\n try:\n f_cached = open('cached', 'rt')\n except FileNotFoundError:\n _, _, chat_id = get_last_chat_id_and_text(get_updates(TOKEN))\n with open('cached', 'wt') as f_cached:\n json.dump({'chat_id': chat_id}, f_cached)\n else:\n chat_id = json.load(f_cached)['chat_id']\n send_message(TOKEN, 'Bot initialized.', chat_id)\n return chat_id\n",
"step-5": "import requests\nimport json\n\nbase_url = f\"https://api.telegram.org/bot\"\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(TOKEN):\n url = f'{base_url}{TOKEN}/getUpdates'\n js = get_json_from_url(url)\n if not js['ok']:\n print('Error: Invalid telegram token!')\n exit(0)\n return js\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n if num_updates == 0:\n print('Error: Please send a message to the bot to initialize!')\n exit(0)\n\n last_update = num_updates - 1\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n msg_timestamp = updates[\"result\"][last_update][\"message\"][\"date\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n\n return text, msg_timestamp, chat_id\n\n\ndef send_message(TOKEN, text, chat_id):\n url = f'{base_url}{TOKEN}/sendMessage?text={text}&chat_id={chat_id}'\n resp = json.loads(get_url(url))\n if not resp['ok']:\n print('Error: Invalid telegram chat_id! Please delete cached.')\n exit(0)\n\n\ndef initialize_bot(TOKEN):\n\n get_updates(TOKEN) # ensure token is valid\n\n # in case the bot doesn't have a recent incoming message, cached will prevent failing\n try:\n f_cached = open('cached', 'rt')\n except FileNotFoundError:\n _, _, chat_id = get_last_chat_id_and_text(get_updates(TOKEN))\n with open('cached', 'wt') as f_cached:\n json.dump({'chat_id': chat_id}, f_cached)\n else:\n chat_id = json.load(f_cached)['chat_id']\n\n send_message(TOKEN, 'Bot initialized.', chat_id) # ensure chat_id is valid\n\n return chat_id\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class User:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Customer(User):
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
self.balance = 0
def getBalance(self, balance):
self.balance = balance
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User:
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Customer(User):
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
self.balance = 0
def getBalance(self, balance):
self.balance = balance
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User:
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
def getUserInfo(self):
user_data = 'select * from students;'
connect.execute(user_data)
data = connect.fetchall()
i = 0
for new_data in data:
print(data)
i += 1
def IncreaseAge(self):
self.age += 1
class Customer(User):
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
self.balance = 0
def getBalance(self, balance):
self.balance = balance
<|reserved_special_token_0|>
brad.getUserInfo()
<|reserved_special_token_1|>
import pymysql
conn = pymysql.connect(host='127.0.0.1', user='root', password='', db='Python')
connect = conn.cursor()
class User:
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
def getUserInfo(self):
user_data = 'select * from students;'
connect.execute(user_data)
data = connect.fetchall()
i = 0
for new_data in data:
print(data)
i += 1
def IncreaseAge(self):
self.age += 1
class Customer(User):
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
self.balance = 0
def getBalance(self, balance):
self.balance = balance
brad = User('Kaushal Patel', 'kaushalpatel089@gmail.com', 22)
customer = Customer('Babulal Kumawat', 'babubhai@gmail.com', 22)
brad.getUserInfo()
<|reserved_special_token_1|>
# A class is like a blueprint for creating objects. An object has properties and methods(functions) associated with it. Almost everything in Python is an object
# import connect
# from connect import connect
#create class
import pymysql
# import MySQLdb
conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')
connect = conn.cursor()
class User():
#constructor
def __init__(self, name,email,age):
self.name = name
self.email = email
self.age = age
def getUserInfo(self):
# return f'His name is {self.name} and his age is {self.age}'
# conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')
# connect = conn.cursor()
user_data = 'select * from students;'
connect.execute(user_data)
data = connect.fetchall()
i=0
for new_data in data:
# print(f'student name is {data[i][1]} {data[i][2]} and age is {data[i][3]}')
print(data)
i += 1
# return connect.fetchall()
def IncreaseAge(self):
self.age += 1
class Customer(User):
#constructor
def __init__(self, name,email,age):
self.name = name
self.email = email
self.age = age
self.balance = 0
def getBalance(self,balance):
self.balance = balance
#Init User object
brad = User('Kaushal Patel','kaushalpatel089@gmail.com',22)
customer = Customer('Babulal Kumawat','babubhai@gmail.com',22)
# brad.IncreaseAge()
# customer.getBalance(22)
# print(customer.getUserInfo())
# print(brad.getUserInfo())
# print(brad.getUserInfo())
# brad.getUserInfo()
brad.getUserInfo()
|
flexible
|
{
"blob_id": "ea045d04b40341f34c780dceab1f21df93b7207a",
"index": 7689,
"step-1": "<mask token>\n\n\nclass User:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User:\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n <mask token>\n <mask token>\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User:\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n\n def getUserInfo(self):\n user_data = 'select * from students;'\n connect.execute(user_data)\n data = connect.fetchall()\n i = 0\n for new_data in data:\n print(data)\n i += 1\n\n def IncreaseAge(self):\n self.age += 1\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\n<mask token>\nbrad.getUserInfo()\n",
"step-4": "import pymysql\nconn = pymysql.connect(host='127.0.0.1', user='root', password='', db='Python')\nconnect = conn.cursor()\n\n\nclass User:\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n\n def getUserInfo(self):\n user_data = 'select * from students;'\n connect.execute(user_data)\n data = connect.fetchall()\n i = 0\n for new_data in data:\n print(data)\n i += 1\n\n def IncreaseAge(self):\n self.age += 1\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\nbrad = User('Kaushal Patel', 'kaushalpatel089@gmail.com', 22)\ncustomer = Customer('Babulal Kumawat', 'babubhai@gmail.com', 22)\nbrad.getUserInfo()\n",
"step-5": "# A class is like a blueprint for creating objects. An object has properties and methods(functions) associated with it. Almost everything in Python is an object\n# import connect \n# from connect import connect\n#create class\n\nimport pymysql\n# import MySQLdb\nconn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')\nconnect = conn.cursor()\nclass User():\n #constructor\n def __init__(self, name,email,age):\n self.name = name\n self.email = email\n self.age = age\n\n def getUserInfo(self):\n # return f'His name is {self.name} and his age is {self.age}'\n # conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')\n # connect = conn.cursor()\n user_data = 'select * from students;'\n connect.execute(user_data)\n data = connect.fetchall()\n i=0\n for new_data in data:\n # print(f'student name is {data[i][1]} {data[i][2]} and age is {data[i][3]}')\n print(data)\n i += 1\n # return connect.fetchall()\n\n def IncreaseAge(self):\n self.age += 1\n\n\nclass Customer(User):\n #constructor\n def __init__(self, name,email,age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self,balance):\n self.balance = balance\n\n \n \n#Init User object\nbrad = User('Kaushal Patel','kaushalpatel089@gmail.com',22)\n\ncustomer = Customer('Babulal Kumawat','babubhai@gmail.com',22)\n# brad.IncreaseAge()\n\n# customer.getBalance(22)\n# print(customer.getUserInfo())\n# print(brad.getUserInfo())\n\n# print(brad.getUserInfo())\n# brad.getUserInfo()\nbrad.getUserInfo()",
"step-ids": [
4,
5,
8,
10,
11
]
}
|
[
4,
5,
8,
10,
11
] |
<|reserved_special_token_0|>
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline='\n')
return open(file, mode=mode, encoding=encoding, newline='\n')
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def bleu_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':
'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.
smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}
if args.tokenize == 'ja-mecab':
signature['tok'] += '-' + TokenizeMeCab().signature()
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
def chrf_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the chrF signature
"""
abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':
'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'version': VERSION, 'space': args.chrf_whitespace,
'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if
args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
<|reserved_special_token_0|>
def extract_char_ngrams(s: str, n: int) ->Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub('\\s+', ' ', s.strip())
def process_to_text(rawfile, txtfile, field: int=None):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info('Processing %s to %s', rawfile, txtfile)
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.xml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.txt'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
elif rawfile.endswith('.tsv'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
files = download_test_set(test_set, langpair)
if side == 'src':
files = [files[0]]
elif side == 'ref':
files.pop(0)
streams = [smart_open(file) for file in files]
streams = _filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
<|reserved_special_token_0|>
class Result:
def __init__(self, score: float):
self.score = score
def __str__(self):
return self.format()
class BLEU(Result):
def __init__(self, score: float, counts, totals, precisions, bp,
sys_len, ref_len):
super().__init__(score)
self.counts = counts
self.totals = totals
self.precisions = precisions
self.bp = bp
self.sys_len = sys_len
self.ref_len = ref_len
def format(self, width=2):
precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])
return (
'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'
.format(score=self.score, width=width, precisions=precisions,
bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.
sys_len, ref_len=self.ref_len))
class CHRF(Result):
def __init__(self, score: float):
super().__init__(score)
def format(self, width=2):
return '{score:.{width}f}'.format(score=self.score, width=width)
<|reserved_special_token_0|>
def corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[
str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,
force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,
use_effective_order=False) ->BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_value: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [(0) for n in range(NGRAM_ORDER)]
total = [(0) for n in range(NGRAM_ORDER)]
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError(
'Source and reference streams have different lengths!')
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(
' .'):
tokenized_count += 1
if tokenized_count == 100:
sacrelogger.warning(
"That's 100 lines that end in a tokenized period ('.')")
sacrelogger.warning(
'It looks like you forgot to detokenize your test data, which may hurt your score.'
)
sacrelogger.warning(
"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'."
)
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n - 1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth_method=
smooth_method, smooth_value=smooth_value, use_effective_order=
use_effective_order)
def raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',
smooth_value=smooth_value, force=True, tokenize='none',
use_effective_order=True)
def delete_whitespace(text: str) ->str:
"""
Removes whitespaces from text.
"""
return re.sub('\\s+', '', text).strip()
<|reserved_special_token_0|>
def _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *
avg_precision + avg_recall)
return score
def corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:
int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True
) ->CHRF:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order
=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,
order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
<|reserved_special_token_0|>
def get_langpairs_for_testset(testset: str) ->List:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match('\\w\\w\\-\\w\\w', x), DATASETS.
get(testset, {}).keys()))
def get_a_list_of_testset_names() ->str:
"""Return a string with a formatted list of available test sets plus their descriptions. """
message = 'The available test sets are:'
for testset in sorted(DATASETS.keys(), reverse=True):
message += '\n%20s: %s' % (testset, DATASETS[testset].get(
'description', ''))
return message
<|reserved_special_token_0|>
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError(
'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'
)
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[
test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception(
'--origlang and --subset supports only *.sgm files, not %s',
rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception(
'No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub('.* origlang="([^"]+)".*\\n',
'\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub('.* docid="([^"]+)".*\\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence, keep in zip(sys, indices_to_keep) if
keep] for sys in systems]
<|reserved_special_token_0|>
def parse_args():
arg_parser = argparse.ArgumentParser(description=
"""sacreBLEU: Hassle-free computation of shareable BLEU scores.
Quick usage: score your detokenized output against WMT'14 EN-DE:
cat output.detok.de | sacrebleu -t wmt14 -l en-de"""
, formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help=
'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'
)
arg_parser.add_argument('-lc', action='store_true', default=False, help
='Use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true',
help='Output metric on each sentence.')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',
'add-k', 'none'], default='exp', help=
'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'
)
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=
None, help=
'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'
.format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),
default=None, help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair',
default=None, help=
'source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=
None, help=
'use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation'
)
arg_parser.add_argument('--subset', dest='subset', default=None, help=
'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'
)
arg_parser.add_argument('--download', type=str, default=None, help=
'download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=
str, default=None, help=
'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'
)
arg_parser.add_argument('--input', '-i', type=str, default='-', help=
'Read input from a file instead of STDIN')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=
'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'
)
arg_parser.add_argument('refs', nargs='*', default=[], help=
'optional list of references (for backwards-compatibility with older scripts)'
)
arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],
nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')
arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true',
default=False, help=
'include whitespace in chrF calculation (default: %(default)s)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action=
'store_true', help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action=
'store_true', help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--citation', '--cite', default=False, action=
'store_true', help='dump the bibtex citation and quit.')
arg_parser.add_argument('--width', '-w', type=int, default=1, help=
'floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action=
'store_true', help=
'print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version', version=
'%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline='\n')
return open(file, mode=mode, encoding=encoding, newline='\n')
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def bleu_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':
'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.
smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}
if args.tokenize == 'ja-mecab':
signature['tok'] += '-' + TokenizeMeCab().signature()
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
def chrf_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the chrF signature
"""
abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':
'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'version': VERSION, 'space': args.chrf_whitespace,
'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if
args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
<|reserved_special_token_0|>
def extract_char_ngrams(s: str, n: int) ->Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub('\\s+', ' ', s.strip())
def process_to_text(rawfile, txtfile, field: int=None):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info('Processing %s to %s', rawfile, txtfile)
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.xml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.txt'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
elif rawfile.endswith('.tsv'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
files = download_test_set(test_set, langpair)
if side == 'src':
files = [files[0]]
elif side == 'ref':
files.pop(0)
streams = [smart_open(file) for file in files]
streams = _filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed files
"""
outdir = os.path.join(SACREBLEU_DIR, test_set)
os.makedirs(outdir, exist_ok=True)
expected_checksums = DATASETS[test_set].get('md5', [None] * len(
DATASETS[test_set]))
for dataset, expected_md5 in zip(DATASETS[test_set]['data'],
expected_checksums):
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
lockfile = '{}.lock'.format(tarball)
with portalocker.Lock(lockfile, 'w', timeout=60):
if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:
sacrelogger.info('Downloading %s to %s', dataset, tarball)
try:
with urllib.request.urlopen(dataset) as f, open(tarball,
'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.warning(
'An SSL error was encountered in downloading the files. If you\'re on a Mac, you may need to run the "Install Certificates.command" file located in the "Python 3" folder, often found under /Applications'
)
sys.exit(1)
if expected_md5 is not None:
md5 = hashlib.md5()
with open(tarball, 'rb') as infile:
for line in infile:
md5.update(line)
if md5.hexdigest() != expected_md5:
sacrelogger.error(
'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'
.format(md5.hexdigest(), expected_md5))
sacrelogger.error(
'Please manually delete "{}" and rerun the command.'
.format(tarball))
sacrelogger.error(
'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'
)
sys.exit(1)
else:
sacrelogger.info('Checksum passed: {}'.format(md5.
hexdigest()))
sacrelogger.info('Extracting %s', tarball)
if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):
import tarfile
with tarfile.open(tarball) as tar:
tar.extractall(path=rawdir)
elif tarball.endswith('.zip'):
import zipfile
with zipfile.ZipFile(tarball, 'r') as zipfile:
zipfile.extractall(path=rawdir)
found = []
languages = DATASETS[test_set].keys() if langpair is None else [langpair]
for pair in languages:
if '-' not in pair:
continue
src, tgt = pair.split('-')
rawfile = DATASETS[test_set][pair][0]
field = None
if rawfile.endswith('.tsv'):
field, rawfile = rawfile.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, rawfile)
outpath = os.path.join(outdir, '{}.{}'.format(pair, src))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
refs = DATASETS[test_set][pair][1:]
for i, ref in enumerate(refs):
field = None
if ref.endswith('.tsv'):
field, ref = ref.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, ref)
if len(refs) >= 2:
outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))
else:
outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
return found
class Result:
def __init__(self, score: float):
self.score = score
def __str__(self):
return self.format()
class BLEU(Result):
def __init__(self, score: float, counts, totals, precisions, bp,
sys_len, ref_len):
super().__init__(score)
self.counts = counts
self.totals = totals
self.precisions = precisions
self.bp = bp
self.sys_len = sys_len
self.ref_len = ref_len
def format(self, width=2):
precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])
return (
'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'
.format(score=self.score, width=width, precisions=precisions,
bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.
sys_len, ref_len=self.ref_len))
class CHRF(Result):
def __init__(self, score: float):
super().__init__(score)
def format(self, width=2):
return '{score:.{width}f}'.format(score=self.score, width=width)
<|reserved_special_token_0|>
def sentence_bleu(hypothesis: str, references: List[str], smooth_method:
str='floor', smooth_value: float=None, use_effective_order: bool=True
) ->BLEU:
"""
Computes BLEU on a single sentence pair.
Disclaimer: computing BLEU on the sentence level is not its intended use,
BLEU is a corpus-level metric.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param smooth_value: For 'floor' smoothing, the floor value to use.
:param use_effective_order: Account for references that are shorter than the largest n-gram.
:return: Returns a single BLEU score as a float.
"""
bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,
smooth_value=smooth_value, use_effective_order=use_effective_order)
return bleu
def corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[
str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,
force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,
use_effective_order=False) ->BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_value: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [(0) for n in range(NGRAM_ORDER)]
total = [(0) for n in range(NGRAM_ORDER)]
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError(
'Source and reference streams have different lengths!')
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(
' .'):
tokenized_count += 1
if tokenized_count == 100:
sacrelogger.warning(
"That's 100 lines that end in a tokenized period ('.')")
sacrelogger.warning(
'It looks like you forgot to detokenize your test data, which may hurt your score.'
)
sacrelogger.warning(
"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'."
)
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n - 1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth_method=
smooth_method, smooth_value=smooth_value, use_effective_order=
use_effective_order)
def raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',
smooth_value=smooth_value, force=True, tokenize='none',
use_effective_order=True)
def delete_whitespace(text: str) ->str:
"""
Removes whitespaces from text.
"""
return re.sub('\\s+', '', text).strip()
<|reserved_special_token_0|>
def _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *
avg_precision + avg_recall)
return score
def corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:
int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True
) ->CHRF:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order
=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,
order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
<|reserved_special_token_0|>
def get_langpairs_for_testset(testset: str) ->List:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match('\\w\\w\\-\\w\\w', x), DATASETS.
get(testset, {}).keys()))
def get_a_list_of_testset_names() ->str:
"""Return a string with a formatted list of available test sets plus their descriptions. """
message = 'The available test sets are:'
for testset in sorted(DATASETS.keys(), reverse=True):
message += '\n%20s: %s' % (testset, DATASETS[testset].get(
'description', ''))
return message
<|reserved_special_token_0|>
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError(
'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'
)
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[
test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception(
'--origlang and --subset supports only *.sgm files, not %s',
rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception(
'No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub('.* origlang="([^"]+)".*\\n',
'\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub('.* docid="([^"]+)".*\\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence, keep in zip(sys, indices_to_keep) if
keep] for sys in systems]
<|reserved_special_token_0|>
def parse_args():
arg_parser = argparse.ArgumentParser(description=
"""sacreBLEU: Hassle-free computation of shareable BLEU scores.
Quick usage: score your detokenized output against WMT'14 EN-DE:
cat output.detok.de | sacrebleu -t wmt14 -l en-de"""
, formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help=
'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'
)
arg_parser.add_argument('-lc', action='store_true', default=False, help
='Use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true',
help='Output metric on each sentence.')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',
'add-k', 'none'], default='exp', help=
'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'
)
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=
None, help=
'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'
.format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),
default=None, help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair',
default=None, help=
'source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=
None, help=
'use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation'
)
arg_parser.add_argument('--subset', dest='subset', default=None, help=
'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'
)
arg_parser.add_argument('--download', type=str, default=None, help=
'download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=
str, default=None, help=
'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'
)
arg_parser.add_argument('--input', '-i', type=str, default='-', help=
'Read input from a file instead of STDIN')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=
'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'
)
arg_parser.add_argument('refs', nargs='*', default=[], help=
'optional list of references (for backwards-compatibility with older scripts)'
)
arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],
nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')
arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true',
default=False, help=
'include whitespace in chrF calculation (default: %(default)s)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action=
'store_true', help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action=
'store_true', help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--citation', '--cite', default=False, action=
'store_true', help='dump the bibtex citation and quit.')
arg_parser.add_argument('--width', '-w', type=int, default=1, help=
'floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action=
'store_true', help=
'print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version', version=
'%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline='\n')
return open(file, mode=mode, encoding=encoding, newline='\n')
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def bleu_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':
'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.
smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}
if args.tokenize == 'ja-mecab':
signature['tok'] += '-' + TokenizeMeCab().signature()
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
def chrf_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the chrF signature
"""
abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':
'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'version': VERSION, 'space': args.chrf_whitespace,
'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if
args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
def extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) ->Counter:
"""Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.
:param line: A segment containing a sequence of words.
:param min_order: Minimum n-gram length (default: 1).
:param max_order: Maximum n-gram length (default: NGRAM_ORDER).
:return: a dictionary containing ngrams and counts
"""
ngrams = Counter()
tokens = line.split()
for n in range(min_order, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngram = ' '.join(tokens[i:i + n])
ngrams[ngram] += 1
return ngrams
def extract_char_ngrams(s: str, n: int) ->Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub('\\s+', ' ', s.strip())
def process_to_text(rawfile, txtfile, field: int=None):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info('Processing %s to %s', rawfile, txtfile)
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.xml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.txt'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
elif rawfile.endswith('.tsv'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
files = download_test_set(test_set, langpair)
if side == 'src':
files = [files[0]]
elif side == 'ref':
files.pop(0)
streams = [smart_open(file) for file in files]
streams = _filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed files
"""
outdir = os.path.join(SACREBLEU_DIR, test_set)
os.makedirs(outdir, exist_ok=True)
expected_checksums = DATASETS[test_set].get('md5', [None] * len(
DATASETS[test_set]))
for dataset, expected_md5 in zip(DATASETS[test_set]['data'],
expected_checksums):
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
lockfile = '{}.lock'.format(tarball)
with portalocker.Lock(lockfile, 'w', timeout=60):
if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:
sacrelogger.info('Downloading %s to %s', dataset, tarball)
try:
with urllib.request.urlopen(dataset) as f, open(tarball,
'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.warning(
'An SSL error was encountered in downloading the files. If you\'re on a Mac, you may need to run the "Install Certificates.command" file located in the "Python 3" folder, often found under /Applications'
)
sys.exit(1)
if expected_md5 is not None:
md5 = hashlib.md5()
with open(tarball, 'rb') as infile:
for line in infile:
md5.update(line)
if md5.hexdigest() != expected_md5:
sacrelogger.error(
'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'
.format(md5.hexdigest(), expected_md5))
sacrelogger.error(
'Please manually delete "{}" and rerun the command.'
.format(tarball))
sacrelogger.error(
'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'
)
sys.exit(1)
else:
sacrelogger.info('Checksum passed: {}'.format(md5.
hexdigest()))
sacrelogger.info('Extracting %s', tarball)
if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):
import tarfile
with tarfile.open(tarball) as tar:
tar.extractall(path=rawdir)
elif tarball.endswith('.zip'):
import zipfile
with zipfile.ZipFile(tarball, 'r') as zipfile:
zipfile.extractall(path=rawdir)
found = []
languages = DATASETS[test_set].keys() if langpair is None else [langpair]
for pair in languages:
if '-' not in pair:
continue
src, tgt = pair.split('-')
rawfile = DATASETS[test_set][pair][0]
field = None
if rawfile.endswith('.tsv'):
field, rawfile = rawfile.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, rawfile)
outpath = os.path.join(outdir, '{}.{}'.format(pair, src))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
refs = DATASETS[test_set][pair][1:]
for i, ref in enumerate(refs):
field = None
if ref.endswith('.tsv'):
field, ref = ref.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, ref)
if len(refs) >= 2:
outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))
else:
outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
return found
class Result:
def __init__(self, score: float):
self.score = score
def __str__(self):
return self.format()
class BLEU(Result):
def __init__(self, score: float, counts, totals, precisions, bp,
sys_len, ref_len):
super().__init__(score)
self.counts = counts
self.totals = totals
self.precisions = precisions
self.bp = bp
self.sys_len = sys_len
self.ref_len = ref_len
def format(self, width=2):
precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])
return (
'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'
.format(score=self.score, width=width, precisions=precisions,
bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.
sys_len, ref_len=self.ref_len))
class CHRF(Result):
def __init__(self, score: float):
super().__init__(score)
def format(self, width=2):
return '{score:.{width}f}'.format(score=self.score, width=width)
<|reserved_special_token_0|>
def sentence_bleu(hypothesis: str, references: List[str], smooth_method:
str='floor', smooth_value: float=None, use_effective_order: bool=True
) ->BLEU:
"""
Computes BLEU on a single sentence pair.
Disclaimer: computing BLEU on the sentence level is not its intended use,
BLEU is a corpus-level metric.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param smooth_value: For 'floor' smoothing, the floor value to use.
:param use_effective_order: Account for references that are shorter than the largest n-gram.
:return: Returns a single BLEU score as a float.
"""
bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,
smooth_value=smooth_value, use_effective_order=use_effective_order)
return bleu
def corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[
str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,
force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,
use_effective_order=False) ->BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_value: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [(0) for n in range(NGRAM_ORDER)]
total = [(0) for n in range(NGRAM_ORDER)]
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError(
'Source and reference streams have different lengths!')
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(
' .'):
tokenized_count += 1
if tokenized_count == 100:
sacrelogger.warning(
"That's 100 lines that end in a tokenized period ('.')")
sacrelogger.warning(
'It looks like you forgot to detokenize your test data, which may hurt your score.'
)
sacrelogger.warning(
"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'."
)
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n - 1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth_method=
smooth_method, smooth_value=smooth_value, use_effective_order=
use_effective_order)
def raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',
smooth_value=smooth_value, force=True, tokenize='none',
use_effective_order=True)
def delete_whitespace(text: str) ->str:
"""
Removes whitespaces from text.
"""
return re.sub('\\s+', '', text).strip()
<|reserved_special_token_0|>
def get_corpus_statistics(hypotheses: Iterable[str], references: Iterable[
str], order: int=CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:
corpus_statistics = [0] * (order * 3)
for hypothesis, reference in zip(hypotheses, references):
statistics = get_sentence_statistics(hypothesis, reference, order=
order, remove_whitespace=remove_whitespace)
for i in range(len(statistics)):
corpus_statistics[i] += statistics[i]
return corpus_statistics
<|reserved_special_token_0|>
def _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *
avg_precision + avg_recall)
return score
def corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:
int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True
) ->CHRF:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order
=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,
order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
<|reserved_special_token_0|>
def get_langpairs_for_testset(testset: str) ->List:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match('\\w\\w\\-\\w\\w', x), DATASETS.
get(testset, {}).keys()))
def get_a_list_of_testset_names() ->str:
"""Return a string with a formatted list of available test sets plus their descriptions. """
message = 'The available test sets are:'
for testset in sorted(DATASETS.keys(), reverse=True):
message += '\n%20s: %s' % (testset, DATASETS[testset].get(
'description', ''))
return message
<|reserved_special_token_0|>
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError(
'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'
)
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[
test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception(
'--origlang and --subset supports only *.sgm files, not %s',
rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception(
'No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub('.* origlang="([^"]+)".*\\n',
'\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub('.* docid="([^"]+)".*\\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence, keep in zip(sys, indices_to_keep) if
keep] for sys in systems]
<|reserved_special_token_0|>
def parse_args():
arg_parser = argparse.ArgumentParser(description=
"""sacreBLEU: Hassle-free computation of shareable BLEU scores.
Quick usage: score your detokenized output against WMT'14 EN-DE:
cat output.detok.de | sacrebleu -t wmt14 -l en-de"""
, formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help=
'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'
)
arg_parser.add_argument('-lc', action='store_true', default=False, help
='Use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true',
help='Output metric on each sentence.')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',
'add-k', 'none'], default='exp', help=
'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'
)
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=
None, help=
'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'
.format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),
default=None, help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair',
default=None, help=
'source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=
None, help=
'use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation'
)
arg_parser.add_argument('--subset', dest='subset', default=None, help=
'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'
)
arg_parser.add_argument('--download', type=str, default=None, help=
'download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=
str, default=None, help=
'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'
)
arg_parser.add_argument('--input', '-i', type=str, default='-', help=
'Read input from a file instead of STDIN')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=
'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'
)
arg_parser.add_argument('refs', nargs='*', default=[], help=
'optional list of references (for backwards-compatibility with older scripts)'
)
arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],
nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')
arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true',
default=False, help=
'include whitespace in chrF calculation (default: %(default)s)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action=
'store_true', help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action=
'store_true', help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--citation', '--cite', default=False, action=
'store_true', help='dump the bibtex citation and quit.')
arg_parser.add_argument('--width', '-w', type=int, default=1, help=
'floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action=
'store_true', help=
'print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version', version=
'%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline='\n')
return open(file, mode=mode, encoding=encoding, newline='\n')
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def bleu_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':
'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.
smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}
if args.tokenize == 'ja-mecab':
signature['tok'] += '-' + TokenizeMeCab().signature()
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
def chrf_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the chrF signature
"""
abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':
'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}
signature = {'version': VERSION, 'space': args.chrf_whitespace,
'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if
args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,
signature[x]) for x in sorted(signature.keys())])
return sigstr
def extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) ->Counter:
"""Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.
:param line: A segment containing a sequence of words.
:param min_order: Minimum n-gram length (default: 1).
:param max_order: Maximum n-gram length (default: NGRAM_ORDER).
:return: a dictionary containing ngrams and counts
"""
ngrams = Counter()
tokens = line.split()
for n in range(min_order, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngram = ' '.join(tokens[i:i + n])
ngrams[ngram] += 1
return ngrams
def extract_char_ngrams(s: str, n: int) ->Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub('\\s+', ' ', s.strip())
def process_to_text(rawfile, txtfile, field: int=None):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info('Processing %s to %s', rawfile, txtfile)
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.xml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\1',
line)), file=fout)
elif rawfile.endswith('.txt'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
elif rawfile.endswith('.tsv'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
files = download_test_set(test_set, langpair)
if side == 'src':
files = [files[0]]
elif side == 'ref':
files.pop(0)
streams = [smart_open(file) for file in files]
streams = _filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed files
"""
outdir = os.path.join(SACREBLEU_DIR, test_set)
os.makedirs(outdir, exist_ok=True)
expected_checksums = DATASETS[test_set].get('md5', [None] * len(
DATASETS[test_set]))
for dataset, expected_md5 in zip(DATASETS[test_set]['data'],
expected_checksums):
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
lockfile = '{}.lock'.format(tarball)
with portalocker.Lock(lockfile, 'w', timeout=60):
if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:
sacrelogger.info('Downloading %s to %s', dataset, tarball)
try:
with urllib.request.urlopen(dataset) as f, open(tarball,
'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.warning(
'An SSL error was encountered in downloading the files. If you\'re on a Mac, you may need to run the "Install Certificates.command" file located in the "Python 3" folder, often found under /Applications'
)
sys.exit(1)
if expected_md5 is not None:
md5 = hashlib.md5()
with open(tarball, 'rb') as infile:
for line in infile:
md5.update(line)
if md5.hexdigest() != expected_md5:
sacrelogger.error(
'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'
.format(md5.hexdigest(), expected_md5))
sacrelogger.error(
'Please manually delete "{}" and rerun the command.'
.format(tarball))
sacrelogger.error(
'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'
)
sys.exit(1)
else:
sacrelogger.info('Checksum passed: {}'.format(md5.
hexdigest()))
sacrelogger.info('Extracting %s', tarball)
if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):
import tarfile
with tarfile.open(tarball) as tar:
tar.extractall(path=rawdir)
elif tarball.endswith('.zip'):
import zipfile
with zipfile.ZipFile(tarball, 'r') as zipfile:
zipfile.extractall(path=rawdir)
found = []
languages = DATASETS[test_set].keys() if langpair is None else [langpair]
for pair in languages:
if '-' not in pair:
continue
src, tgt = pair.split('-')
rawfile = DATASETS[test_set][pair][0]
field = None
if rawfile.endswith('.tsv'):
field, rawfile = rawfile.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, rawfile)
outpath = os.path.join(outdir, '{}.{}'.format(pair, src))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
refs = DATASETS[test_set][pair][1:]
for i, ref in enumerate(refs):
field = None
if ref.endswith('.tsv'):
field, ref = ref.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, ref)
if len(refs) >= 2:
outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))
else:
outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
return found
class Result:
def __init__(self, score: float):
self.score = score
def __str__(self):
return self.format()
class BLEU(Result):
def __init__(self, score: float, counts, totals, precisions, bp,
sys_len, ref_len):
super().__init__(score)
self.counts = counts
self.totals = totals
self.precisions = precisions
self.bp = bp
self.sys_len = sys_len
self.ref_len = ref_len
def format(self, width=2):
precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])
return (
'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'
.format(score=self.score, width=width, precisions=precisions,
bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.
sys_len, ref_len=self.ref_len))
class CHRF(Result):
def __init__(self, score: float):
super().__init__(score)
def format(self, width=2):
return '{score:.{width}f}'.format(score=self.score, width=width)
<|reserved_special_token_0|>
def sentence_bleu(hypothesis: str, references: List[str], smooth_method:
str='floor', smooth_value: float=None, use_effective_order: bool=True
) ->BLEU:
"""
Computes BLEU on a single sentence pair.
Disclaimer: computing BLEU on the sentence level is not its intended use,
BLEU is a corpus-level metric.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param smooth_value: For 'floor' smoothing, the floor value to use.
:param use_effective_order: Account for references that are shorter than the largest n-gram.
:return: Returns a single BLEU score as a float.
"""
bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,
smooth_value=smooth_value, use_effective_order=use_effective_order)
return bleu
def corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[
str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,
force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,
use_effective_order=False) ->BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_value: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [(0) for n in range(NGRAM_ORDER)]
total = [(0) for n in range(NGRAM_ORDER)]
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError(
'Source and reference streams have different lengths!')
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(
' .'):
tokenized_count += 1
if tokenized_count == 100:
sacrelogger.warning(
"That's 100 lines that end in a tokenized period ('.')")
sacrelogger.warning(
'It looks like you forgot to detokenize your test data, which may hurt your score.'
)
sacrelogger.warning(
"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'."
)
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n - 1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth_method=
smooth_method, smooth_value=smooth_value, use_effective_order=
use_effective_order)
def raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',
smooth_value=smooth_value, force=True, tokenize='none',
use_effective_order=True)
def delete_whitespace(text: str) ->str:
"""
Removes whitespaces from text.
"""
return re.sub('\\s+', '', text).strip()
def get_sentence_statistics(hypothesis: str, reference: str, order: int=
CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:
hypothesis = delete_whitespace(hypothesis
) if remove_whitespace else hypothesis
reference = delete_whitespace(reference
) if remove_whitespace else reference
statistics = [0] * (order * 3)
for i in range(order):
n = i + 1
hypothesis_ngrams = extract_char_ngrams(hypothesis, n)
reference_ngrams = extract_char_ngrams(reference, n)
common_ngrams = hypothesis_ngrams & reference_ngrams
statistics[3 * i + 0] = sum(hypothesis_ngrams.values())
statistics[3 * i + 1] = sum(reference_ngrams.values())
statistics[3 * i + 2] = sum(common_ngrams.values())
return statistics
def get_corpus_statistics(hypotheses: Iterable[str], references: Iterable[
str], order: int=CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:
corpus_statistics = [0] * (order * 3)
for hypothesis, reference in zip(hypotheses, references):
statistics = get_sentence_statistics(hypothesis, reference, order=
order, remove_whitespace=remove_whitespace)
for i in range(len(statistics)):
corpus_statistics[i] += statistics[i]
return corpus_statistics
def _avg_precision_and_recall(statistics: List[float], order: int) ->Tuple[
float, float]:
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for i in range(order):
hypotheses_ngrams = statistics[3 * i + 0]
references_ngrams = statistics[3 * i + 1]
common_ngrams = statistics[3 * i + 2]
if hypotheses_ngrams > 0 and references_ngrams > 0:
avg_precision += common_ngrams / hypotheses_ngrams
avg_recall += common_ngrams / references_ngrams
effective_order += 1
if effective_order == 0:
return 0.0, 0.0
avg_precision /= effective_order
avg_recall /= effective_order
return avg_precision, avg_recall
def _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *
avg_precision + avg_recall)
return score
def corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:
int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True
) ->CHRF:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order
=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,
order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def sentence_chrf(hypothesis: str, reference: str, order: int=CHRF_ORDER,
beta: float=CHRF_BETA, remove_whitespace: bool=True) ->CHRF:
"""
Computes ChrF on a single sentence pair.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
statistics = get_sentence_statistics(hypothesis, reference, order=order,
remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def get_langpairs_for_testset(testset: str) ->List:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match('\\w\\w\\-\\w\\w', x), DATASETS.
get(testset, {}).keys()))
def get_a_list_of_testset_names() ->str:
"""Return a string with a formatted list of available test sets plus their descriptions. """
message = 'The available test sets are:'
for testset in sorted(DATASETS.keys(), reverse=True):
message += '\n%20s: %s' % (testset, DATASETS[testset].get(
'description', ''))
return message
def _available_origlangs(test_sets, langpair):
"""Return a list of origlang values in according to the raw SGM files."""
origlangs = set()
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[
test_set][langpair][0])
if rawfile.endswith('.sgm'):
with smart_open(rawfile) as fin:
for line in fin:
if line.startswith('<doc '):
doc_origlang = re.sub('.* origlang="([^"]+)".*\\n',
'\\1', line)
origlangs.add(doc_origlang)
return sorted(list(origlangs))
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError(
'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'
)
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[
test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception(
'--origlang and --subset supports only *.sgm files, not %s',
rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception(
'No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub('.* origlang="([^"]+)".*\\n',
'\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub('.* docid="([^"]+)".*\\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence, keep in zip(sys, indices_to_keep) if
keep] for sys in systems]
def main():
args = parse_args()
sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8',
buffering=True, newline='\n')
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8',
buffering=True)
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s'
)
if args.download:
download_test_set(args.download, args.langpair)
sys.exit(0)
if args.list:
if args.test_set:
print(' '.join(get_langpairs_for_testset(args.test_set)))
else:
print(get_a_list_of_testset_names())
sys.exit(0)
if args.sentence_level and len(args.metrics) > 1:
sacrelogger.error(
'Only one metric can be used with Sentence-level reporting.')
sys.exit(1)
if args.citation:
if not args.test_set:
sacrelogger.error('I need a test set (-t).')
sys.exit(1)
for test_set in args.test_set.split(','):
if 'citation' not in DATASETS[test_set]:
sacrelogger.error('No citation found for %s', test_set)
else:
print(DATASETS[test_set]['citation'])
sys.exit(0)
if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1
):
sacrelogger.error(
'The --num-refs argument allows you to provide any number of tab-delimited references in a single file.'
)
sacrelogger.error(
'You can only use it with externaly-provided references, however (i.e., not with `-t`),'
)
sacrelogger.error(
'and you cannot then provide multiple reference files.')
sys.exit(1)
if args.test_set is not None:
for test_set in args.test_set.split(','):
if test_set not in DATASETS:
sacrelogger.error('Unknown test set "%s"\n%s', test_set,
get_a_list_of_testset_names())
sys.exit(1)
if args.test_set is None:
if len(args.refs) == 0:
sacrelogger.error(
'I need either a predefined test set (-t) or a list of references'
)
sacrelogger.error(get_a_list_of_testset_names())
sys.exit(1)
elif len(args.refs) > 0:
sacrelogger.error(
'I need exactly one of (a) a predefined test set (-t) or (b) a list of references'
)
sys.exit(1)
elif args.langpair is None:
sacrelogger.error('I need a language pair (-l).')
sys.exit(1)
else:
for test_set in args.test_set.split(','):
if args.langpair not in DATASETS[test_set]:
sacrelogger.error('No such language pair "%s"', args.langpair)
sacrelogger.error(
'Available language pairs for test set "%s": %s',
test_set, ', '.join(x for x in DATASETS[test_set].keys(
) if '-' in x))
sys.exit(1)
if args.echo:
if args.langpair is None or args.test_set is None:
sacrelogger.warning(
'--echo requires a test set (--t) and a language pair (-l)')
sys.exit(1)
for test_set in args.test_set.split(','):
print_test_set(test_set, args.langpair, args.echo, args.
origlang, args.subset)
sys.exit(0)
if args.test_set is not None and args.tokenize == 'none':
sacrelogger.warning(
"""You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply
your own reference tokenization. Published numbers will not be comparable with other papers.
"""
)
if args.tokenize is None:
if args.langpair is not None and args.langpair.split('-')[1] == 'zh':
args.tokenize = 'zh'
elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':
args.tokenize = 'ja-mecab'
else:
args.tokenize = DEFAULT_TOKENIZER
if args.langpair is not None and 'bleu' in args.metrics:
if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':
logger.warning(
'You should also pass "--tok zh" when scoring Chinese...')
if args.langpair.split('-')[1
] == 'ja' and not args.tokenize.startswith('ja-'):
logger.warning(
'You should also pass "--tok ja-mecab" when scoring Japanese...'
)
if args.test_set is None:
concat_ref_files = [args.refs]
else:
concat_ref_files = []
for test_set in args.test_set.split(','):
_, *ref_files = download_test_set(test_set, args.langpair)
if len(ref_files) == 0:
sacrelogger.warning('No references found for test set {}/{}.'
.format(test_set, args.langpair))
concat_ref_files.append(ref_files)
inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding
) if args.input == '-' else smart_open(args.input, encoding=args.
encoding)
full_system = inputfh.readlines()
full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.
num_refs))]
for ref_files in concat_ref_files:
for refno, ref_file in enumerate(ref_files):
for lineno, line in enumerate(smart_open(ref_file, encoding=
args.encoding), 1):
if args.num_refs != 1:
splits = line.rstrip().split(sep='\t', maxsplit=args.
num_refs - 1)
if len(splits) != args.num_refs:
sacrelogger.error(
'FATAL: line {}: expected {} fields, but found {}.'
.format(lineno, args.num_refs, len(splits)))
sys.exit(17)
for refno, split in enumerate(splits):
full_refs[refno].append(split)
else:
full_refs[refno].append(line)
system, *refs = _filter_subset([full_system, *full_refs], args.test_set,
args.langpair, args.origlang, args.subset)
if len(system) == 0:
message = 'Test set %s contains no sentence' % args.test_set
if args.origlang is not None or args.subset is not None:
message += ' with'
message += ('' if args.origlang is None else ' origlang=' +
args.origlang)
message += '' if args.subset is None else ' subset=' + args.subset
sacrelogger.error(message)
exit(1)
if args.sentence_level:
for output, *references in zip(system, *refs):
results = []
for metric in args.metrics:
if metric == 'bleu':
bleu = sentence_bleu(output, [[x] for x in references],
smooth_method=args.smooth, smooth_value=args.
smooth_value)
results.append(bleu)
if metric == 'chrf':
chrf = sentence_chrf(output, references[0], args.
chrf_order, args.chrf_beta, remove_whitespace=not
args.chrf_whitespace)
results.append(chrf)
display_metric(args.metrics, results, len(refs), args)
sys.exit(0)
results = []
try:
for metric in args.metrics:
if metric == 'bleu':
bleu = corpus_bleu(system, refs, smooth_method=args.smooth,
smooth_value=args.smooth_value, force=args.force,
lowercase=args.lc, tokenize=args.tokenize)
results.append(bleu)
elif metric == 'chrf':
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta,
order=args.chrf_order, remove_whitespace=not args.
chrf_whitespace)
results.append(chrf)
except EOFError:
sacrelogger.error(
'The input and reference stream(s) were of different lengths.')
if args.test_set is not None:
sacrelogger.error(
"""
This could be a problem with your system output or with sacreBLEU's reference database.
If the latter, you can clean out the references cache by typing:
rm -r %s/%s
They will be downloaded automatically again the next time you run sacreBLEU."""
, SACREBLEU_DIR, args.test_set)
sys.exit(1)
display_metric(args.metrics, results, len(refs), args)
if args.detail:
width = args.width
sents_digits = len(str(len(full_system)))
origlangs = args.origlang if args.origlang else _available_origlangs(
args.test_set, args.langpair)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = _filter_subset([full_system, *full_refs],
args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
if subset in COUNTRIES:
subset_str = '%20s' % ('country=' + subset)
elif subset in DOMAINS:
subset_str = '%20s' % ('domain=' + subset)
else:
subset_str = '%20s' % ''
if 'bleu' in args.metrics:
bleu = corpus_bleu(system, refs, smooth_method=args.
smooth, smooth_value=args.smooth_value, force=args.
force, lowercase=args.lc, tokenize=args.tokenize)
print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.
format(origlang, subset_str, len(system),
sents_digits, bleu.score, width + 4, width))
if 'chrf' in args.metrics:
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta,
order=args.chrf_order, remove_whitespace=not args.
chrf_whitespace)
print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.
format(origlang, subset_str, len(system),
sents_digits, chrf.score, width + 4, width))
def display_metric(metrics_to_print, results, num_refs, args):
"""
Badly in need of refactoring.
One idea is to put all of this in the BLEU and CHRF classes, and then define
a Result::signature() function.
"""
for metric, result in zip(metrics_to_print, results):
if metric == 'bleu':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = bleu_signature(args, num_refs)
print(result.format(args.width).replace('BLEU', 'BLEU+' +
version_str))
elif metric == 'chrf':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = chrf_signature(args, num_refs)
print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta,
version_str, result.score, args.width))
def parse_args():
arg_parser = argparse.ArgumentParser(description=
"""sacreBLEU: Hassle-free computation of shareable BLEU scores.
Quick usage: score your detokenized output against WMT'14 EN-DE:
cat output.detok.de | sacrebleu -t wmt14 -l en-de"""
, formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help=
'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'
)
arg_parser.add_argument('-lc', action='store_true', default=False, help
='Use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true',
help='Output metric on each sentence.')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',
'add-k', 'none'], default='exp', help=
'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'
)
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=
None, help=
'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'
.format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),
default=None, help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair',
default=None, help=
'source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=
None, help=
'use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation'
)
arg_parser.add_argument('--subset', dest='subset', default=None, help=
'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'
)
arg_parser.add_argument('--download', type=str, default=None, help=
'download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=
str, default=None, help=
'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'
)
arg_parser.add_argument('--input', '-i', type=str, default='-', help=
'Read input from a file instead of STDIN')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=
'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'
)
arg_parser.add_argument('refs', nargs='*', default=[], help=
'optional list of references (for backwards-compatibility with older scripts)'
)
arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],
nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')
arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true',
default=False, help=
'include whitespace in chrF calculation (default: %(default)s)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action=
'store_true', help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action=
'store_true', help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--citation', '--cite', default=False, action=
'store_true', help='dump the bibtex citation and quit.')
arg_parser.add_argument('--width', '-w', type=int, default=1, help=
'floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action=
'store_true', help=
'print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version', version=
'%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
See the [README.md] file for more information.
"""
import argparse
import gzip
import hashlib
import io
import logging
import math
import os
import portalocker
import re
import sys
import ssl
import urllib.request
from collections import Counter
from itertools import zip_longest
from typing import List, Iterable, Tuple, Union
from .tokenizer import TOKENIZERS, TokenizeMeCab
from .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS
from . import __version__ as VERSION
sacrelogger = logging.getLogger('sacrebleu')
try:
# SIGPIPE is not available on Windows machines, throwing an exception.
from signal import SIGPIPE
# If SIGPIPE is available, change behaviour to default instead of ignore.
from signal import signal, SIG_DFL
signal(SIGPIPE, SIG_DFL)
except ImportError:
sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')
# Where to store downloaded test sets.
# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.
#
# Querying for a HOME environment variable can result in None (e.g., on Windows)
# in which case the os.path.join() throws a TypeError. Using expanduser() is
# a safe way to get the user's home folder.
USERHOME = os.path.expanduser("~")
SACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))
# n-gram order. Don't change this.
NGRAM_ORDER = 4
# Default values for CHRF
CHRF_ORDER = 6
# default to 2 (per http://www.aclweb.org/anthology/W16-2341)
CHRF_BETA = 2
# The default floor value to use with `--smooth floor`
SMOOTH_VALUE_DEFAULT = {'floor': 0.0, 'add-k': 1}
DEFAULT_TOKENIZER = '13a'
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline="\n")
return open(file, mode=mode, encoding=encoding, newline="\n")
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def bleu_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
# Abbreviations for the signature
abbr = {
'test': 't',
'lang': 'l',
'smooth': 's',
'case': 'c',
'tok': 'tok',
'numrefs': '#',
'version': 'v',
'origlang': 'o',
'subset': 'S',
}
signature = {'tok': args.tokenize,
'version': VERSION,
'smooth': args.smooth,
'numrefs': numrefs,
'case': 'lc' if args.lc else 'mixed'}
# For the Japanese tokenizer, add a dictionary type and its version to the signature.
if args.tokenize == "ja-mecab":
signature['tok'] += "-" + TokenizeMeCab().signature()
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])
return sigstr
def chrf_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the chrF signature
"""
# Abbreviations for the signature
abbr = {
'test': 't',
'lang': 'l',
'numchars': 'n',
'space': 's',
'case': 'c',
'numrefs': '#',
'version': 'v',
'origlang': 'o',
'subset': 'S',
}
signature = {'version': VERSION,
'space': args.chrf_whitespace,
'numchars': args.chrf_order,
'numrefs': numrefs,
'case': 'lc' if args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])
return sigstr
def extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) -> Counter:
"""Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.
:param line: A segment containing a sequence of words.
:param min_order: Minimum n-gram length (default: 1).
:param max_order: Maximum n-gram length (default: NGRAM_ORDER).
:return: a dictionary containing ngrams and counts
"""
ngrams = Counter()
tokens = line.split()
for n in range(min_order, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngram = ' '.join(tokens[i: i + n])
ngrams[ngram] += 1
return ngrams
def extract_char_ngrams(s: str, n: int) -> Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub(r'\s+', ' ', s.strip())
def process_to_text(rawfile, txtfile, field: int=None):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info("Processing %s to %s", rawfile, txtfile)
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
elif rawfile.endswith('.xml'): # IWSLT
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
elif rawfile.endswith('.txt'): # wmt17/ms
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
elif rawfile.endswith('.tsv'): # MTNT
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
files = download_test_set(test_set, langpair)
if side == 'src':
files = [files[0]]
elif side == 'ref':
files.pop(0)
streams = [smart_open(file) for file in files]
streams = _filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed files
"""
outdir = os.path.join(SACREBLEU_DIR, test_set)
os.makedirs(outdir, exist_ok=True)
expected_checksums = DATASETS[test_set].get('md5', [None] * len(DATASETS[test_set]))
for dataset, expected_md5 in zip(DATASETS[test_set]['data'], expected_checksums):
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
lockfile = '{}.lock'.format(tarball)
with portalocker.Lock(lockfile, 'w', timeout=60):
if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:
sacrelogger.info("Downloading %s to %s", dataset, tarball)
try:
with urllib.request.urlopen(dataset) as f, open(tarball, 'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.warning('An SSL error was encountered in downloading the files. If you\'re on a Mac, '
'you may need to run the "Install Certificates.command" file located in the '
'"Python 3" folder, often found under /Applications')
sys.exit(1)
# Check md5sum
if expected_md5 is not None:
md5 = hashlib.md5()
with open(tarball, 'rb') as infile:
for line in infile:
md5.update(line)
if md5.hexdigest() != expected_md5:
sacrelogger.error('Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'.format(md5.hexdigest(), expected_md5))
sacrelogger.error('Please manually delete "{}" and rerun the command.'.format(tarball))
sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.')
sys.exit(1)
else:
sacrelogger.info('Checksum passed: {}'.format(md5.hexdigest()))
# Extract the tarball
sacrelogger.info('Extracting %s', tarball)
if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):
import tarfile
with tarfile.open(tarball) as tar:
tar.extractall(path=rawdir)
elif tarball.endswith('.zip'):
import zipfile
with zipfile.ZipFile(tarball, 'r') as zipfile:
zipfile.extractall(path=rawdir)
found = []
# Process the files into plain text
languages = DATASETS[test_set].keys() if langpair is None else [langpair]
for pair in languages:
if '-' not in pair:
continue
src, tgt = pair.split('-')
rawfile = DATASETS[test_set][pair][0]
field = None # used for TSV files
if rawfile.endswith('.tsv'):
field, rawfile = rawfile.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, rawfile)
outpath = os.path.join(outdir, '{}.{}'.format(pair, src))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
refs = DATASETS[test_set][pair][1:]
for i, ref in enumerate(refs):
field = None
if ref.endswith('.tsv'):
field, ref = ref.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, ref)
if len(refs) >= 2:
outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))
else:
outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
return found
class Result:
def __init__(self, score: float):
self.score = score
def __str__(self):
return self.format()
class BLEU(Result):
def __init__(self,
score: float,
counts,
totals,
precisions,
bp,
sys_len,
ref_len):
super().__init__(score)
self.counts = counts
self.totals = totals
self.precisions = precisions
self.bp = bp
self.sys_len = sys_len
self.ref_len = ref_len
def format(self, width=2):
precisions = "/".join(["{:.1f}".format(p) for p in self.precisions])
return 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'.format(
score=self.score,
width=width,
precisions=precisions,
bp=self.bp,
ratio=self.sys_len / self.ref_len,
sys_len=self.sys_len,
ref_len=self.ref_len)
class CHRF(Result):
def __init__(self, score: float):
super().__init__(score)
def format(self, width=2):
return '{score:.{width}f}'.format(score=self.score, width=width)
def compute_bleu(correct: List[int],
total: List[int],
sys_len: int,
ref_len: int,
smooth_method = 'none',
smooth_value = None,
use_effective_order = False) -> BLEU:
"""Computes BLEU score from its sufficient statistics. Adds smoothing.
Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU",
Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)
- exp: NIST smoothing method (Method 3)
- floor: Method 1
- add-k: Method 2 (generalizing Lin and Och, 2004)
- none: do nothing.
:param correct: List of counts of correct ngrams, 1 <= n <= NGRAM_ORDER
:param total: List of counts of total ngrams, 1 <= n <= NGRAM_ORDER
:param sys_len: The cumulative system length
:param ref_len: The cumulative reference length
:param smooth: The smoothing method to use
:param smooth_value: The smoothing value added, if smooth method 'floor' is used
:param use_effective_order: If true, use the length of `correct` for the n-gram order instead of NGRAM_ORDER.
:return: A BLEU object with the score (100-based) and other statistics.
"""
if smooth_method in SMOOTH_VALUE_DEFAULT and smooth_value is None:
smooth_value = SMOOTH_VALUE_DEFAULT[smooth_method]
precisions = [0 for x in range(NGRAM_ORDER)]
smooth_mteval = 1.
effective_order = NGRAM_ORDER
for n in range(1, NGRAM_ORDER + 1):
if smooth_method == 'add-k' and n > 1:
correct[n-1] += smooth_value
total[n-1] += smooth_value
if total[n-1] == 0:
break
if use_effective_order:
effective_order = n
if correct[n-1] == 0:
if smooth_method == 'exp':
smooth_mteval *= 2
precisions[n-1] = 100. / (smooth_mteval * total[n-1])
elif smooth_method == 'floor':
precisions[n-1] = 100. * smooth_value / total[n-1]
else:
precisions[n-1] = 100. * correct[n-1] / total[n-1]
# If the system guesses no i-grams, 1 <= i <= NGRAM_ORDER, the BLEU score is 0 (technically undefined).
# This is a problem for sentence-level BLEU or a corpus of short sentences, where systems will get no credit
# if sentence lengths fall under the NGRAM_ORDER threshold. This fix scales NGRAM_ORDER to the observed
# maximum order. It is only available through the API and off by default
brevity_penalty = 1.0
if sys_len < ref_len:
brevity_penalty = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
score = brevity_penalty * math.exp(sum(map(my_log, precisions[:effective_order])) / effective_order)
return BLEU(score, correct, total, precisions, brevity_penalty, sys_len, ref_len)
def sentence_bleu(hypothesis: str,
references: List[str],
smooth_method: str = 'floor',
smooth_value: float = None,
use_effective_order: bool = True) -> BLEU:
"""
Computes BLEU on a single sentence pair.
Disclaimer: computing BLEU on the sentence level is not its intended use,
BLEU is a corpus-level metric.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param smooth_value: For 'floor' smoothing, the floor value to use.
:param use_effective_order: Account for references that are shorter than the largest n-gram.
:return: Returns a single BLEU score as a float.
"""
bleu = corpus_bleu(hypothesis, references,
smooth_method=smooth_method,
smooth_value=smooth_value,
use_effective_order=use_effective_order)
return bleu
def corpus_bleu(sys_stream: Union[str, Iterable[str]],
ref_streams: Union[str, List[Iterable[str]]],
smooth_method='exp',
smooth_value=None,
force=False,
lowercase=False,
tokenize=DEFAULT_TOKENIZER,
use_effective_order=False) -> BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_value: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
# Add some robustness to the input arguments
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [0 for n in range(NGRAM_ORDER)]
total = [0 for n in range(NGRAM_ORDER)]
# look for already-tokenized sentences
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError("Source and reference streams have different lengths!")
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(' .'):
tokenized_count += 1
if tokenized_count == 100:
sacrelogger.warning('That\'s 100 lines that end in a tokenized period (\'.\')')
sacrelogger.warning('It looks like you forgot to detokenize your test data, which may hurt your score.')
sacrelogger.warning('If you insist your data is detokenized, or don\'t care, you can suppress this message with \'--force\'.')
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n-1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n-1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth_method=smooth_method, smooth_value=smooth_value, use_effective_order=use_effective_order)
def raw_corpus_bleu(sys_stream,
ref_streams,
smooth_value=None) -> BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth_method='floor', smooth_value=smooth_value, force=True, tokenize='none', use_effective_order=True)
def delete_whitespace(text: str) -> str:
"""
Removes whitespaces from text.
"""
return re.sub(r'\s+', '', text).strip()
def get_sentence_statistics(hypothesis: str,
reference: str,
order: int = CHRF_ORDER,
remove_whitespace: bool = True) -> List[float]:
hypothesis = delete_whitespace(hypothesis) if remove_whitespace else hypothesis
reference = delete_whitespace(reference) if remove_whitespace else reference
statistics = [0] * (order * 3)
for i in range(order):
n = i + 1
hypothesis_ngrams = extract_char_ngrams(hypothesis, n)
reference_ngrams = extract_char_ngrams(reference, n)
common_ngrams = hypothesis_ngrams & reference_ngrams
statistics[3 * i + 0] = sum(hypothesis_ngrams.values())
statistics[3 * i + 1] = sum(reference_ngrams.values())
statistics[3 * i + 2] = sum(common_ngrams.values())
return statistics
def get_corpus_statistics(hypotheses: Iterable[str],
references: Iterable[str],
order: int = CHRF_ORDER,
remove_whitespace: bool = True) -> List[float]:
corpus_statistics = [0] * (order * 3)
for hypothesis, reference in zip(hypotheses, references):
statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)
for i in range(len(statistics)):
corpus_statistics[i] += statistics[i]
return corpus_statistics
def _avg_precision_and_recall(statistics: List[float], order: int) -> Tuple[float, float]:
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for i in range(order):
hypotheses_ngrams = statistics[3 * i + 0]
references_ngrams = statistics[3 * i + 1]
common_ngrams = statistics[3 * i + 2]
if hypotheses_ngrams > 0 and references_ngrams > 0:
avg_precision += common_ngrams / hypotheses_ngrams
avg_recall += common_ngrams / references_ngrams
effective_order += 1
if effective_order == 0:
return 0.0, 0.0
avg_precision /= effective_order
avg_recall /= effective_order
return avg_precision, avg_recall
def _chrf(avg_precision, avg_recall, beta: int = CHRF_BETA) -> float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
score = (1 + beta_square) * (avg_precision * avg_recall) / ((beta_square * avg_precision) + avg_recall)
return score
def corpus_chrf(hypotheses: Iterable[str],
references: Iterable[str],
order: int = CHRF_ORDER,
beta: float = CHRF_BETA,
remove_whitespace: bool = True) -> CHRF:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics, order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def sentence_chrf(hypothesis: str,
reference: str,
order: int = CHRF_ORDER,
beta: float = CHRF_BETA,
remove_whitespace: bool = True) -> CHRF:
"""
Computes ChrF on a single sentence pair.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def get_langpairs_for_testset(testset: str) -> List:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match('\w\w\-\w\w', x), DATASETS.get(testset, {}).keys()))
def get_a_list_of_testset_names() -> str:
"""Return a string with a formatted list of available test sets plus their descriptions. """
message = 'The available test sets are:'
for testset in sorted(DATASETS.keys(), reverse=True):
message += '\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))
return message
def _available_origlangs(test_sets, langpair):
"""Return a list of origlang values in according to the raw SGM files."""
origlangs = set()
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if rawfile.endswith('.sgm'):
with smart_open(rawfile) as fin:
for line in fin:
if line.startswith('<doc '):
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
origlangs.add(doc_origlang)
return sorted(list(origlangs))
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception('No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub(r'.* docid="([^"]+)".*\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems]
def main():
args = parse_args()
# Explicitly set the encoding
sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n")
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True)
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s')
if args.download:
download_test_set(args.download, args.langpair)
sys.exit(0)
if args.list:
if args.test_set:
print(' '.join(get_langpairs_for_testset(args.test_set)))
else:
print(get_a_list_of_testset_names())
sys.exit(0)
if args.sentence_level and len(args.metrics) > 1:
sacrelogger.error('Only one metric can be used with Sentence-level reporting.')
sys.exit(1)
if args.citation:
if not args.test_set:
sacrelogger.error('I need a test set (-t).')
sys.exit(1)
for test_set in args.test_set.split(','):
if 'citation' not in DATASETS[test_set]:
sacrelogger.error('No citation found for %s', test_set)
else:
print(DATASETS[test_set]['citation'])
sys.exit(0)
if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1):
sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.')
sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),')
sacrelogger.error('and you cannot then provide multiple reference files.')
sys.exit(1)
if args.test_set is not None:
for test_set in args.test_set.split(','):
if test_set not in DATASETS:
sacrelogger.error('Unknown test set "%s"\n%s', test_set, get_a_list_of_testset_names())
sys.exit(1)
if args.test_set is None:
if len(args.refs) == 0:
sacrelogger.error('I need either a predefined test set (-t) or a list of references')
sacrelogger.error(get_a_list_of_testset_names())
sys.exit(1)
elif len(args.refs) > 0:
sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')
sys.exit(1)
elif args.langpair is None:
sacrelogger.error('I need a language pair (-l).')
sys.exit(1)
else:
for test_set in args.test_set.split(','):
if args.langpair not in DATASETS[test_set]:
sacrelogger.error('No such language pair "%s"', args.langpair)
sacrelogger.error('Available language pairs for test set "%s": %s', test_set,
', '.join(x for x in DATASETS[test_set].keys() if '-' in x))
sys.exit(1)
if args.echo:
if args.langpair is None or args.test_set is None:
sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)")
sys.exit(1)
for test_set in args.test_set.split(','):
print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset)
sys.exit(0)
if args.test_set is not None and args.tokenize == 'none':
sacrelogger.warning("You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\n"
"your own reference tokenization. Published numbers will not be comparable with other papers.\n")
# Internal tokenizer settings. Set to 'zh' for Chinese DEFAULT_TOKENIZER (
if args.tokenize is None:
# set default
if args.langpair is not None and args.langpair.split('-')[1] == 'zh':
args.tokenize = 'zh'
elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':
args.tokenize = 'ja-mecab'
else:
args.tokenize = DEFAULT_TOKENIZER
if args.langpair is not None and 'bleu' in args.metrics:
if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':
logger.warning('You should also pass "--tok zh" when scoring Chinese...')
if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'):
logger.warning('You should also pass "--tok ja-mecab" when scoring Japanese...')
# concat_ref_files is a list of list of reference filenames, for example:
# concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]]
if args.test_set is None:
concat_ref_files = [args.refs]
else:
concat_ref_files = []
for test_set in args.test_set.split(','):
_, *ref_files = download_test_set(test_set, args.langpair)
if len(ref_files) == 0:
sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair))
concat_ref_files.append(ref_files)
inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) if args.input == '-' else smart_open(args.input, encoding=args.encoding)
full_system = inputfh.readlines()
# Read references
full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))]
for ref_files in concat_ref_files:
for refno, ref_file in enumerate(ref_files):
for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1):
if args.num_refs != 1:
splits = line.rstrip().split(sep='\t', maxsplit=args.num_refs-1)
if len(splits) != args.num_refs:
sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits)))
sys.exit(17)
for refno, split in enumerate(splits):
full_refs[refno].append(split)
else:
full_refs[refno].append(line)
# Filter sentences according to a given origlang
system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset)
if len(system) == 0:
message = 'Test set %s contains no sentence' % args.test_set
if args.origlang is not None or args.subset is not None:
message += ' with'
message += '' if args.origlang is None else ' origlang=' + args.origlang
message += '' if args.subset is None else ' subset=' + args.subset
sacrelogger.error(message)
exit(1)
# Handle sentence level and quit
if args.sentence_level:
for output, *references in zip(system, *refs):
results = []
for metric in args.metrics:
if metric == 'bleu':
bleu = sentence_bleu(output,
[[x] for x in references],
smooth_method=args.smooth,
smooth_value=args.smooth_value)
results.append(bleu)
if metric == 'chrf':
chrf = sentence_chrf(output,
references[0],
args.chrf_order,
args.chrf_beta,
remove_whitespace=not args.chrf_whitespace)
results.append(chrf)
display_metric(args.metrics, results, len(refs), args)
sys.exit(0)
# Else, handle system level
results = []
try:
for metric in args.metrics:
if metric == 'bleu':
bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)
results.append(bleu)
elif metric == 'chrf':
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)
results.append(chrf)
except EOFError:
sacrelogger.error('The input and reference stream(s) were of different lengths.')
if args.test_set is not None:
sacrelogger.error('\nThis could be a problem with your system output or with sacreBLEU\'s reference database.\n'
'If the latter, you can clean out the references cache by typing:\n'
'\n'
' rm -r %s/%s\n'
'\n'
'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR,
args.test_set)
sys.exit(1)
display_metric(args.metrics, results, len(refs), args)
if args.detail:
width = args.width
sents_digits = len(str(len(full_system)))
origlangs = args.origlang if args.origlang else _available_origlangs(args.test_set, args.langpair)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
if subset in COUNTRIES:
subset_str = '%20s' % ('country=' + subset)
elif subset in DOMAINS:
subset_str = '%20s' % ('domain=' + subset)
else:
subset_str = '%20s' % ''
if 'bleu' in args.metrics:
bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)
print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, bleu.score, width+4, width))
if 'chrf' in args.metrics:
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)
print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, chrf.score, width+4, width))
def display_metric(metrics_to_print, results, num_refs, args):
"""
Badly in need of refactoring.
One idea is to put all of this in the BLEU and CHRF classes, and then define
a Result::signature() function.
"""
for metric, result in zip(metrics_to_print, results):
if metric == 'bleu':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = bleu_signature(args, num_refs)
print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))
elif metric == 'chrf':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = chrf_signature(args, num_refs)
print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))
def parse_args():
arg_parser = argparse.ArgumentParser(
description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n'
'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n'
' cat output.detok.de | sacrebleu -t wmt14 -l en-de',
# epilog = 'Available test sets: ' + ','.join(sorted(DATASETS.keys(), reverse=True)),
formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated')
arg_parser.add_argument('-lc', action='store_true', default=False,
help='Use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true',
help='Output metric on each sentence.')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor', 'add-k', 'none'],
default='exp',
help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none')
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None,
help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format(
SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None,
help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,
help='source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None,
help='use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation')
arg_parser.add_argument('--subset', dest='subset', default=None,
help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)')
arg_parser.add_argument('--download', type=str, default=None,
help='download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None,
help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit')
arg_parser.add_argument('--input', '-i', type=str, default='-',
help='Read input from a file instead of STDIN')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1,
help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.')
arg_parser.add_argument('refs', nargs='*', default=[],
help='optional list of references (for backwards-compatibility with older scripts)')
arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'], nargs='+',
default=['bleu'],
help='metrics to compute (default: bleu)')
arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False,
help='include whitespace in chrF calculation (default: %(default)s)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',
help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--citation', '--cite', default=False, action='store_true',
help='dump the bibtex citation and quit.')
arg_parser.add_argument('--width', '-w', type=int, default=1,
help='floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action='store_true',
help='print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "8adcd75e925fe0c5a50b2fc7dc8c472a9610b4f2",
"index": 9575,
"step-1": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\n<mask token>\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\n<mask token>\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\n<mask token>\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\n<mask token>\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\n<mask token>\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\n<mask token>\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\n<mask token>\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(\n DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'],\n expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info('Downloading %s to %s', dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball,\n 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning(\n 'An SSL error was encountered in downloading the files. If you\\'re on a Mac, you may need to run the \"Install Certificates.command\" file located in the \"Python 3\" folder, often found under /Applications'\n )\n sys.exit(1)\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error(\n 'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'\n .format(md5.hexdigest(), expected_md5))\n sacrelogger.error(\n 'Please manually delete \"{}\" and rerun the command.'\n .format(tarball))\n sacrelogger.error(\n 'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'\n )\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.\n hexdigest()))\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n found = []\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n return found\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef sentence_bleu(hypothesis: str, references: List[str], smooth_method:\n str='floor', smooth_value: float=None, use_effective_order: bool=True\n ) ->BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,\n smooth_value=smooth_value, use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\n<mask token>\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\n<mask token>\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\n<mask token>\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\n<mask token>\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) ->Counter:\n \"\"\"Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.\n\n :param line: A segment containing a sequence of words.\n :param min_order: Minimum n-gram length (default: 1).\n :param max_order: Maximum n-gram length (default: NGRAM_ORDER).\n :return: a dictionary containing ngrams and counts\n \"\"\"\n ngrams = Counter()\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i:i + n])\n ngrams[ngram] += 1\n return ngrams\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(\n DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'],\n expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info('Downloading %s to %s', dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball,\n 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning(\n 'An SSL error was encountered in downloading the files. If you\\'re on a Mac, you may need to run the \"Install Certificates.command\" file located in the \"Python 3\" folder, often found under /Applications'\n )\n sys.exit(1)\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error(\n 'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'\n .format(md5.hexdigest(), expected_md5))\n sacrelogger.error(\n 'Please manually delete \"{}\" and rerun the command.'\n .format(tarball))\n sacrelogger.error(\n 'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'\n )\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.\n hexdigest()))\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n found = []\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n return found\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef sentence_bleu(hypothesis: str, references: List[str], smooth_method:\n str='floor', smooth_value: float=None, use_effective_order: bool=True\n ) ->BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,\n smooth_value=smooth_value, use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\n<mask token>\n\n\ndef get_corpus_statistics(hypotheses: Iterable[str], references: Iterable[\n str], order: int=CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:\n corpus_statistics = [0] * (order * 3)\n for hypothesis, reference in zip(hypotheses, references):\n statistics = get_sentence_statistics(hypothesis, reference, order=\n order, remove_whitespace=remove_whitespace)\n for i in range(len(statistics)):\n corpus_statistics[i] += statistics[i]\n return corpus_statistics\n\n\n<mask token>\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\n<mask token>\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\n<mask token>\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\n<mask token>\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) ->Counter:\n \"\"\"Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.\n\n :param line: A segment containing a sequence of words.\n :param min_order: Minimum n-gram length (default: 1).\n :param max_order: Maximum n-gram length (default: NGRAM_ORDER).\n :return: a dictionary containing ngrams and counts\n \"\"\"\n ngrams = Counter()\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i:i + n])\n ngrams[ngram] += 1\n return ngrams\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(\n DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'],\n expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info('Downloading %s to %s', dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball,\n 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning(\n 'An SSL error was encountered in downloading the files. If you\\'re on a Mac, you may need to run the \"Install Certificates.command\" file located in the \"Python 3\" folder, often found under /Applications'\n )\n sys.exit(1)\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error(\n 'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'\n .format(md5.hexdigest(), expected_md5))\n sacrelogger.error(\n 'Please manually delete \"{}\" and rerun the command.'\n .format(tarball))\n sacrelogger.error(\n 'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'\n )\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.\n hexdigest()))\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n found = []\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n return found\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef sentence_bleu(hypothesis: str, references: List[str], smooth_method:\n str='floor', smooth_value: float=None, use_effective_order: bool=True\n ) ->BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,\n smooth_value=smooth_value, use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\ndef get_sentence_statistics(hypothesis: str, reference: str, order: int=\n CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:\n hypothesis = delete_whitespace(hypothesis\n ) if remove_whitespace else hypothesis\n reference = delete_whitespace(reference\n ) if remove_whitespace else reference\n statistics = [0] * (order * 3)\n for i in range(order):\n n = i + 1\n hypothesis_ngrams = extract_char_ngrams(hypothesis, n)\n reference_ngrams = extract_char_ngrams(reference, n)\n common_ngrams = hypothesis_ngrams & reference_ngrams\n statistics[3 * i + 0] = sum(hypothesis_ngrams.values())\n statistics[3 * i + 1] = sum(reference_ngrams.values())\n statistics[3 * i + 2] = sum(common_ngrams.values())\n return statistics\n\n\ndef get_corpus_statistics(hypotheses: Iterable[str], references: Iterable[\n str], order: int=CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:\n corpus_statistics = [0] * (order * 3)\n for hypothesis, reference in zip(hypotheses, references):\n statistics = get_sentence_statistics(hypothesis, reference, order=\n order, remove_whitespace=remove_whitespace)\n for i in range(len(statistics)):\n corpus_statistics[i] += statistics[i]\n return corpus_statistics\n\n\ndef _avg_precision_and_recall(statistics: List[float], order: int) ->Tuple[\n float, float]:\n avg_precision = 0.0\n avg_recall = 0.0\n effective_order = 0\n for i in range(order):\n hypotheses_ngrams = statistics[3 * i + 0]\n references_ngrams = statistics[3 * i + 1]\n common_ngrams = statistics[3 * i + 2]\n if hypotheses_ngrams > 0 and references_ngrams > 0:\n avg_precision += common_ngrams / hypotheses_ngrams\n avg_recall += common_ngrams / references_ngrams\n effective_order += 1\n if effective_order == 0:\n return 0.0, 0.0\n avg_precision /= effective_order\n avg_recall /= effective_order\n return avg_precision, avg_recall\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef sentence_chrf(hypothesis: str, reference: str, order: int=CHRF_ORDER,\n beta: float=CHRF_BETA, remove_whitespace: bool=True) ->CHRF:\n \"\"\"\n Computes ChrF on a single sentence pair.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n statistics = get_sentence_statistics(hypothesis, reference, order=order,\n remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\ndef _available_origlangs(test_sets, langpair):\n \"\"\"Return a list of origlang values in according to the raw SGM files.\"\"\"\n origlangs = set()\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if rawfile.endswith('.sgm'):\n with smart_open(rawfile) as fin:\n for line in fin:\n if line.startswith('<doc '):\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n origlangs.add(doc_origlang)\n return sorted(list(origlangs))\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\ndef main():\n args = parse_args()\n sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8',\n buffering=True, newline='\\n')\n sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8',\n buffering=True)\n if not args.quiet:\n logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s'\n )\n if args.download:\n download_test_set(args.download, args.langpair)\n sys.exit(0)\n if args.list:\n if args.test_set:\n print(' '.join(get_langpairs_for_testset(args.test_set)))\n else:\n print(get_a_list_of_testset_names())\n sys.exit(0)\n if args.sentence_level and len(args.metrics) > 1:\n sacrelogger.error(\n 'Only one metric can be used with Sentence-level reporting.')\n sys.exit(1)\n if args.citation:\n if not args.test_set:\n sacrelogger.error('I need a test set (-t).')\n sys.exit(1)\n for test_set in args.test_set.split(','):\n if 'citation' not in DATASETS[test_set]:\n sacrelogger.error('No citation found for %s', test_set)\n else:\n print(DATASETS[test_set]['citation'])\n sys.exit(0)\n if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1\n ):\n sacrelogger.error(\n 'The --num-refs argument allows you to provide any number of tab-delimited references in a single file.'\n )\n sacrelogger.error(\n 'You can only use it with externaly-provided references, however (i.e., not with `-t`),'\n )\n sacrelogger.error(\n 'and you cannot then provide multiple reference files.')\n sys.exit(1)\n if args.test_set is not None:\n for test_set in args.test_set.split(','):\n if test_set not in DATASETS:\n sacrelogger.error('Unknown test set \"%s\"\\n%s', test_set,\n get_a_list_of_testset_names())\n sys.exit(1)\n if args.test_set is None:\n if len(args.refs) == 0:\n sacrelogger.error(\n 'I need either a predefined test set (-t) or a list of references'\n )\n sacrelogger.error(get_a_list_of_testset_names())\n sys.exit(1)\n elif len(args.refs) > 0:\n sacrelogger.error(\n 'I need exactly one of (a) a predefined test set (-t) or (b) a list of references'\n )\n sys.exit(1)\n elif args.langpair is None:\n sacrelogger.error('I need a language pair (-l).')\n sys.exit(1)\n else:\n for test_set in args.test_set.split(','):\n if args.langpair not in DATASETS[test_set]:\n sacrelogger.error('No such language pair \"%s\"', args.langpair)\n sacrelogger.error(\n 'Available language pairs for test set \"%s\": %s',\n test_set, ', '.join(x for x in DATASETS[test_set].keys(\n ) if '-' in x))\n sys.exit(1)\n if args.echo:\n if args.langpair is None or args.test_set is None:\n sacrelogger.warning(\n '--echo requires a test set (--t) and a language pair (-l)')\n sys.exit(1)\n for test_set in args.test_set.split(','):\n print_test_set(test_set, args.langpair, args.echo, args.\n origlang, args.subset)\n sys.exit(0)\n if args.test_set is not None and args.tokenize == 'none':\n sacrelogger.warning(\n \"\"\"You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\nyour own reference tokenization. Published numbers will not be comparable with other papers.\n\"\"\"\n )\n if args.tokenize is None:\n if args.langpair is not None and args.langpair.split('-')[1] == 'zh':\n args.tokenize = 'zh'\n elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':\n args.tokenize = 'ja-mecab'\n else:\n args.tokenize = DEFAULT_TOKENIZER\n if args.langpair is not None and 'bleu' in args.metrics:\n if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':\n logger.warning(\n 'You should also pass \"--tok zh\" when scoring Chinese...')\n if args.langpair.split('-')[1\n ] == 'ja' and not args.tokenize.startswith('ja-'):\n logger.warning(\n 'You should also pass \"--tok ja-mecab\" when scoring Japanese...'\n )\n if args.test_set is None:\n concat_ref_files = [args.refs]\n else:\n concat_ref_files = []\n for test_set in args.test_set.split(','):\n _, *ref_files = download_test_set(test_set, args.langpair)\n if len(ref_files) == 0:\n sacrelogger.warning('No references found for test set {}/{}.'\n .format(test_set, args.langpair))\n concat_ref_files.append(ref_files)\n inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding\n ) if args.input == '-' else smart_open(args.input, encoding=args.\n encoding)\n full_system = inputfh.readlines()\n full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.\n num_refs))]\n for ref_files in concat_ref_files:\n for refno, ref_file in enumerate(ref_files):\n for lineno, line in enumerate(smart_open(ref_file, encoding=\n args.encoding), 1):\n if args.num_refs != 1:\n splits = line.rstrip().split(sep='\\t', maxsplit=args.\n num_refs - 1)\n if len(splits) != args.num_refs:\n sacrelogger.error(\n 'FATAL: line {}: expected {} fields, but found {}.'\n .format(lineno, args.num_refs, len(splits)))\n sys.exit(17)\n for refno, split in enumerate(splits):\n full_refs[refno].append(split)\n else:\n full_refs[refno].append(line)\n system, *refs = _filter_subset([full_system, *full_refs], args.test_set,\n args.langpair, args.origlang, args.subset)\n if len(system) == 0:\n message = 'Test set %s contains no sentence' % args.test_set\n if args.origlang is not None or args.subset is not None:\n message += ' with'\n message += ('' if args.origlang is None else ' origlang=' +\n args.origlang)\n message += '' if args.subset is None else ' subset=' + args.subset\n sacrelogger.error(message)\n exit(1)\n if args.sentence_level:\n for output, *references in zip(system, *refs):\n results = []\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = sentence_bleu(output, [[x] for x in references],\n smooth_method=args.smooth, smooth_value=args.\n smooth_value)\n results.append(bleu)\n if metric == 'chrf':\n chrf = sentence_chrf(output, references[0], args.\n chrf_order, args.chrf_beta, remove_whitespace=not\n args.chrf_whitespace)\n results.append(chrf)\n display_metric(args.metrics, results, len(refs), args)\n sys.exit(0)\n results = []\n try:\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = corpus_bleu(system, refs, smooth_method=args.smooth,\n smooth_value=args.smooth_value, force=args.force,\n lowercase=args.lc, tokenize=args.tokenize)\n results.append(bleu)\n elif metric == 'chrf':\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta,\n order=args.chrf_order, remove_whitespace=not args.\n chrf_whitespace)\n results.append(chrf)\n except EOFError:\n sacrelogger.error(\n 'The input and reference stream(s) were of different lengths.')\n if args.test_set is not None:\n sacrelogger.error(\n \"\"\"\nThis could be a problem with your system output or with sacreBLEU's reference database.\nIf the latter, you can clean out the references cache by typing:\n\n rm -r %s/%s\n\nThey will be downloaded automatically again the next time you run sacreBLEU.\"\"\"\n , SACREBLEU_DIR, args.test_set)\n sys.exit(1)\n display_metric(args.metrics, results, len(refs), args)\n if args.detail:\n width = args.width\n sents_digits = len(str(len(full_system)))\n origlangs = args.origlang if args.origlang else _available_origlangs(\n args.test_set, args.langpair)\n for origlang in origlangs:\n subsets = [None]\n if args.subset is not None:\n subsets += [args.subset]\n elif all(t in SUBSETS for t in args.test_set.split(',')):\n subsets += COUNTRIES + DOMAINS\n for subset in subsets:\n system, *refs = _filter_subset([full_system, *full_refs],\n args.test_set, args.langpair, origlang, subset)\n if len(system) == 0:\n continue\n if subset in COUNTRIES:\n subset_str = '%20s' % ('country=' + subset)\n elif subset in DOMAINS:\n subset_str = '%20s' % ('domain=' + subset)\n else:\n subset_str = '%20s' % ''\n if 'bleu' in args.metrics:\n bleu = corpus_bleu(system, refs, smooth_method=args.\n smooth, smooth_value=args.smooth_value, force=args.\n force, lowercase=args.lc, tokenize=args.tokenize)\n print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.\n format(origlang, subset_str, len(system),\n sents_digits, bleu.score, width + 4, width))\n if 'chrf' in args.metrics:\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta,\n order=args.chrf_order, remove_whitespace=not args.\n chrf_whitespace)\n print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.\n format(origlang, subset_str, len(system),\n sents_digits, chrf.score, width + 4, width))\n\n\ndef display_metric(metrics_to_print, results, num_refs, args):\n \"\"\"\n Badly in need of refactoring.\n One idea is to put all of this in the BLEU and CHRF classes, and then define\n a Result::signature() function.\n \"\"\"\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' +\n version_str))\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta,\n version_str, result.score, args.width))\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not\n# use this file except in compliance with the License. A copy of the License\n# is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on\n# an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"\nSacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.\nInspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.\nIt also knows all the standard test sets and handles downloading, processing, and tokenization for you.\n\nSee the [README.md] file for more information.\n\"\"\"\n\nimport argparse\nimport gzip\nimport hashlib\nimport io\nimport logging\nimport math\nimport os\nimport portalocker\nimport re\nimport sys\nimport ssl\nimport urllib.request\n\nfrom collections import Counter\nfrom itertools import zip_longest\nfrom typing import List, Iterable, Tuple, Union\nfrom .tokenizer import TOKENIZERS, TokenizeMeCab\nfrom .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS\nfrom . import __version__ as VERSION\n\nsacrelogger = logging.getLogger('sacrebleu')\n\ntry:\n # SIGPIPE is not available on Windows machines, throwing an exception.\n from signal import SIGPIPE\n\n # If SIGPIPE is available, change behaviour to default instead of ignore.\n from signal import signal, SIG_DFL\n signal(SIGPIPE, SIG_DFL)\n\nexcept ImportError:\n sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')\n\n# Where to store downloaded test sets.\n# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.\n#\n# Querying for a HOME environment variable can result in None (e.g., on Windows)\n# in which case the os.path.join() throws a TypeError. Using expanduser() is\n# a safe way to get the user's home folder.\nUSERHOME = os.path.expanduser(\"~\")\nSACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))\n\n# n-gram order. Don't change this.\nNGRAM_ORDER = 4\n\n# Default values for CHRF\nCHRF_ORDER = 6\n# default to 2 (per http://www.aclweb.org/anthology/W16-2341)\nCHRF_BETA = 2\n\n# The default floor value to use with `--smooth floor`\nSMOOTH_VALUE_DEFAULT = {'floor': 0.0, 'add-k': 1}\n\n\nDEFAULT_TOKENIZER = '13a'\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline=\"\\n\")\n return open(file, mode=mode, encoding=encoding, newline=\"\\n\")\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n\n # Abbreviations for the signature\n abbr = {\n 'test': 't',\n 'lang': 'l',\n 'smooth': 's',\n 'case': 'c',\n 'tok': 'tok',\n 'numrefs': '#',\n 'version': 'v',\n 'origlang': 'o',\n 'subset': 'S',\n }\n\n signature = {'tok': args.tokenize,\n 'version': VERSION,\n 'smooth': args.smooth,\n 'numrefs': numrefs,\n 'case': 'lc' if args.lc else 'mixed'}\n\n # For the Japanese tokenizer, add a dictionary type and its version to the signature.\n if args.tokenize == \"ja-mecab\":\n signature['tok'] += \"-\" + TokenizeMeCab().signature()\n\n if args.test_set is not None:\n signature['test'] = args.test_set\n\n if args.langpair is not None:\n signature['lang'] = args.langpair\n\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])\n\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n\n # Abbreviations for the signature\n abbr = {\n 'test': 't',\n 'lang': 'l',\n 'numchars': 'n',\n 'space': 's',\n 'case': 'c',\n 'numrefs': '#',\n 'version': 'v',\n 'origlang': 'o',\n 'subset': 'S',\n }\n\n signature = {'version': VERSION,\n 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order,\n 'numrefs': numrefs,\n 'case': 'lc' if args.lc else 'mixed'}\n\n if args.test_set is not None:\n signature['test'] = args.test_set\n\n if args.langpair is not None:\n signature['lang'] = args.langpair\n\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])\n\n return sigstr\n\n\ndef extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) -> Counter:\n \"\"\"Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.\n\n :param line: A segment containing a sequence of words.\n :param min_order: Minimum n-gram length (default: 1).\n :param max_order: Maximum n-gram length (default: NGRAM_ORDER).\n :return: a dictionary containing ngrams and counts\n \"\"\"\n\n ngrams = Counter()\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i: i + n])\n ngrams[ngram] += 1\n\n return ngrams\n\n\ndef extract_char_ngrams(s: str, n: int) -> Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub(r'\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info(\"Processing %s to %s\", rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.xml'): # IWSLT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.txt'): # wmt17/ms\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'): # MTNT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'], expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info(\"Downloading %s to %s\", dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball, 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning('An SSL error was encountered in downloading the files. If you\\'re on a Mac, '\n 'you may need to run the \"Install Certificates.command\" file located in the '\n '\"Python 3\" folder, often found under /Applications')\n sys.exit(1)\n\n # Check md5sum\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error('Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'.format(md5.hexdigest(), expected_md5))\n sacrelogger.error('Please manually delete \"{}\" and rerun the command.'.format(tarball))\n sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.')\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.hexdigest()))\n\n # Extract the tarball\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n\n found = []\n\n # Process the files into plain text\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None # used for TSV files\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n\n return found\n\n\nclass Result:\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n def __init__(self,\n score: float,\n counts,\n totals,\n precisions,\n bp,\n sys_len,\n ref_len):\n super().__init__(score)\n\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = \"/\".join([\"{:.1f}\".format(p) for p in self.precisions])\n return 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'.format(\n score=self.score,\n width=width,\n precisions=precisions,\n bp=self.bp,\n ratio=self.sys_len / self.ref_len,\n sys_len=self.sys_len,\n ref_len=self.ref_len)\n\n\nclass CHRF(Result):\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\ndef compute_bleu(correct: List[int],\n total: List[int],\n sys_len: int,\n ref_len: int,\n smooth_method = 'none',\n smooth_value = None,\n use_effective_order = False) -> BLEU:\n \"\"\"Computes BLEU score from its sufficient statistics. Adds smoothing.\n\n Smoothing methods (citing \"A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU\",\n Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)\n\n - exp: NIST smoothing method (Method 3)\n - floor: Method 1\n - add-k: Method 2 (generalizing Lin and Och, 2004)\n - none: do nothing.\n\n :param correct: List of counts of correct ngrams, 1 <= n <= NGRAM_ORDER\n :param total: List of counts of total ngrams, 1 <= n <= NGRAM_ORDER\n :param sys_len: The cumulative system length\n :param ref_len: The cumulative reference length\n :param smooth: The smoothing method to use\n :param smooth_value: The smoothing value added, if smooth method 'floor' is used\n :param use_effective_order: If true, use the length of `correct` for the n-gram order instead of NGRAM_ORDER.\n :return: A BLEU object with the score (100-based) and other statistics.\n \"\"\"\n if smooth_method in SMOOTH_VALUE_DEFAULT and smooth_value is None:\n smooth_value = SMOOTH_VALUE_DEFAULT[smooth_method]\n\n precisions = [0 for x in range(NGRAM_ORDER)]\n\n smooth_mteval = 1.\n effective_order = NGRAM_ORDER\n for n in range(1, NGRAM_ORDER + 1):\n if smooth_method == 'add-k' and n > 1:\n correct[n-1] += smooth_value\n total[n-1] += smooth_value\n if total[n-1] == 0:\n break\n\n if use_effective_order:\n effective_order = n\n\n if correct[n-1] == 0:\n if smooth_method == 'exp':\n smooth_mteval *= 2\n precisions[n-1] = 100. / (smooth_mteval * total[n-1])\n elif smooth_method == 'floor':\n precisions[n-1] = 100. * smooth_value / total[n-1]\n else:\n precisions[n-1] = 100. * correct[n-1] / total[n-1]\n\n # If the system guesses no i-grams, 1 <= i <= NGRAM_ORDER, the BLEU score is 0 (technically undefined).\n # This is a problem for sentence-level BLEU or a corpus of short sentences, where systems will get no credit\n # if sentence lengths fall under the NGRAM_ORDER threshold. This fix scales NGRAM_ORDER to the observed\n # maximum order. It is only available through the API and off by default\n\n brevity_penalty = 1.0\n if sys_len < ref_len:\n brevity_penalty = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0\n\n score = brevity_penalty * math.exp(sum(map(my_log, precisions[:effective_order])) / effective_order)\n\n return BLEU(score, correct, total, precisions, brevity_penalty, sys_len, ref_len)\n\n\ndef sentence_bleu(hypothesis: str,\n references: List[str],\n smooth_method: str = 'floor',\n smooth_value: float = None,\n use_effective_order: bool = True) -> BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references,\n smooth_method=smooth_method,\n smooth_value=smooth_value,\n use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]],\n ref_streams: Union[str, List[Iterable[str]]],\n smooth_method='exp',\n smooth_value=None,\n force=False,\n lowercase=False,\n tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) -> BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n\n # Add some robustness to the input arguments\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n\n sys_len = 0\n ref_len = 0\n\n correct = [0 for n in range(NGRAM_ORDER)]\n total = [0 for n in range(NGRAM_ORDER)]\n\n # look for already-tokenized sentences\n tokenized_count = 0\n\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\"Source and reference streams have different lengths!\")\n\n if lowercase:\n lines = [x.lower() for x in lines]\n\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(' .'):\n tokenized_count += 1\n\n if tokenized_count == 100:\n sacrelogger.warning('That\\'s 100 lines that end in a tokenized period (\\'.\\')')\n sacrelogger.warning('It looks like you forgot to detokenize your test data, which may hurt your score.')\n sacrelogger.warning('If you insist your data is detokenized, or don\\'t care, you can suppress this message with \\'--force\\'.')\n\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n\n sys_len += len(output.split())\n ref_len += closest_len\n\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n-1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n-1] += sys_ngrams[ngram]\n\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=smooth_method, smooth_value=smooth_value, use_effective_order=use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream,\n ref_streams,\n smooth_value=None) -> BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor', smooth_value=smooth_value, force=True, tokenize='none', use_effective_order=True)\n\n\ndef delete_whitespace(text: str) -> str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub(r'\\s+', '', text).strip()\n\n\ndef get_sentence_statistics(hypothesis: str,\n reference: str,\n order: int = CHRF_ORDER,\n remove_whitespace: bool = True) -> List[float]:\n hypothesis = delete_whitespace(hypothesis) if remove_whitespace else hypothesis\n reference = delete_whitespace(reference) if remove_whitespace else reference\n statistics = [0] * (order * 3)\n for i in range(order):\n n = i + 1\n hypothesis_ngrams = extract_char_ngrams(hypothesis, n)\n reference_ngrams = extract_char_ngrams(reference, n)\n common_ngrams = hypothesis_ngrams & reference_ngrams\n statistics[3 * i + 0] = sum(hypothesis_ngrams.values())\n statistics[3 * i + 1] = sum(reference_ngrams.values())\n statistics[3 * i + 2] = sum(common_ngrams.values())\n return statistics\n\n\ndef get_corpus_statistics(hypotheses: Iterable[str],\n references: Iterable[str],\n order: int = CHRF_ORDER,\n remove_whitespace: bool = True) -> List[float]:\n corpus_statistics = [0] * (order * 3)\n for hypothesis, reference in zip(hypotheses, references):\n statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)\n for i in range(len(statistics)):\n corpus_statistics[i] += statistics[i]\n return corpus_statistics\n\n\ndef _avg_precision_and_recall(statistics: List[float], order: int) -> Tuple[float, float]:\n avg_precision = 0.0\n avg_recall = 0.0\n effective_order = 0\n for i in range(order):\n hypotheses_ngrams = statistics[3 * i + 0]\n references_ngrams = statistics[3 * i + 1]\n common_ngrams = statistics[3 * i + 2]\n if hypotheses_ngrams > 0 and references_ngrams > 0:\n avg_precision += common_ngrams / hypotheses_ngrams\n avg_recall += common_ngrams / references_ngrams\n effective_order += 1\n if effective_order == 0:\n return 0.0, 0.0\n avg_precision /= effective_order\n avg_recall /= effective_order\n return avg_precision, avg_recall\n\n\ndef _chrf(avg_precision, avg_recall, beta: int = CHRF_BETA) -> float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / ((beta_square * avg_precision) + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str],\n references: Iterable[str],\n order: int = CHRF_ORDER,\n beta: float = CHRF_BETA,\n remove_whitespace: bool = True) -> CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order=order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics, order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef sentence_chrf(hypothesis: str,\n reference: str,\n order: int = CHRF_ORDER,\n beta: float = CHRF_BETA,\n remove_whitespace: bool = True) -> CHRF:\n \"\"\"\n Computes ChrF on a single sentence pair.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef get_langpairs_for_testset(testset: str) -> List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\w\\w\\-\\w\\w', x), DATASETS.get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() -> str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))\n return message\n\n\ndef _available_origlangs(test_sets, langpair):\n \"\"\"Return a list of origlang values in according to the raw SGM files.\"\"\"\n origlangs = set()\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])\n if rawfile.endswith('.sgm'):\n with smart_open(rawfile) as fin:\n for line in fin:\n if line.startswith('<doc '):\n doc_origlang = re.sub(r'.* origlang=\"([^\"]+)\".*\\n', '\\\\1', line)\n origlangs.add(doc_origlang)\n return sorted(list(origlangs))\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')\n\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception('No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub(r'.* origlang=\"([^\"]+)\".*\\n', '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub(r'.* docid=\"([^\"]+)\".*\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems]\n\n\ndef main():\n args = parse_args()\n\n # Explicitly set the encoding\n sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline=\"\\n\")\n sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True)\n\n if not args.quiet:\n logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s')\n\n if args.download:\n download_test_set(args.download, args.langpair)\n sys.exit(0)\n\n if args.list:\n if args.test_set:\n print(' '.join(get_langpairs_for_testset(args.test_set)))\n else:\n print(get_a_list_of_testset_names())\n sys.exit(0)\n\n if args.sentence_level and len(args.metrics) > 1:\n sacrelogger.error('Only one metric can be used with Sentence-level reporting.')\n sys.exit(1)\n\n if args.citation:\n if not args.test_set:\n sacrelogger.error('I need a test set (-t).')\n sys.exit(1)\n for test_set in args.test_set.split(','):\n if 'citation' not in DATASETS[test_set]:\n sacrelogger.error('No citation found for %s', test_set)\n else:\n print(DATASETS[test_set]['citation'])\n sys.exit(0)\n\n if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1):\n sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.')\n sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),')\n sacrelogger.error('and you cannot then provide multiple reference files.')\n sys.exit(1)\n\n if args.test_set is not None:\n for test_set in args.test_set.split(','):\n if test_set not in DATASETS:\n sacrelogger.error('Unknown test set \"%s\"\\n%s', test_set, get_a_list_of_testset_names())\n sys.exit(1)\n\n if args.test_set is None:\n if len(args.refs) == 0:\n sacrelogger.error('I need either a predefined test set (-t) or a list of references')\n sacrelogger.error(get_a_list_of_testset_names())\n sys.exit(1)\n elif len(args.refs) > 0:\n sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')\n sys.exit(1)\n elif args.langpair is None:\n sacrelogger.error('I need a language pair (-l).')\n sys.exit(1)\n else:\n for test_set in args.test_set.split(','):\n if args.langpair not in DATASETS[test_set]:\n sacrelogger.error('No such language pair \"%s\"', args.langpair)\n sacrelogger.error('Available language pairs for test set \"%s\": %s', test_set,\n ', '.join(x for x in DATASETS[test_set].keys() if '-' in x))\n sys.exit(1)\n\n if args.echo:\n if args.langpair is None or args.test_set is None:\n sacrelogger.warning(\"--echo requires a test set (--t) and a language pair (-l)\")\n sys.exit(1)\n for test_set in args.test_set.split(','):\n print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset)\n sys.exit(0)\n\n if args.test_set is not None and args.tokenize == 'none':\n sacrelogger.warning(\"You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\\n\"\n \"your own reference tokenization. Published numbers will not be comparable with other papers.\\n\")\n\n # Internal tokenizer settings. Set to 'zh' for Chinese DEFAULT_TOKENIZER (\n if args.tokenize is None:\n # set default\n if args.langpair is not None and args.langpair.split('-')[1] == 'zh':\n args.tokenize = 'zh'\n elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':\n args.tokenize = 'ja-mecab'\n else:\n args.tokenize = DEFAULT_TOKENIZER\n\n if args.langpair is not None and 'bleu' in args.metrics:\n if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':\n logger.warning('You should also pass \"--tok zh\" when scoring Chinese...')\n if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'):\n logger.warning('You should also pass \"--tok ja-mecab\" when scoring Japanese...')\n\n # concat_ref_files is a list of list of reference filenames, for example:\n # concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]]\n if args.test_set is None:\n concat_ref_files = [args.refs]\n else:\n concat_ref_files = []\n for test_set in args.test_set.split(','):\n _, *ref_files = download_test_set(test_set, args.langpair)\n if len(ref_files) == 0:\n sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair))\n concat_ref_files.append(ref_files)\n\n\n inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) if args.input == '-' else smart_open(args.input, encoding=args.encoding)\n full_system = inputfh.readlines()\n\n # Read references\n full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))]\n for ref_files in concat_ref_files:\n for refno, ref_file in enumerate(ref_files):\n for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1):\n if args.num_refs != 1:\n splits = line.rstrip().split(sep='\\t', maxsplit=args.num_refs-1)\n if len(splits) != args.num_refs:\n sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits)))\n sys.exit(17)\n for refno, split in enumerate(splits):\n full_refs[refno].append(split)\n else:\n full_refs[refno].append(line)\n\n # Filter sentences according to a given origlang\n system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset)\n if len(system) == 0:\n message = 'Test set %s contains no sentence' % args.test_set\n if args.origlang is not None or args.subset is not None:\n message += ' with'\n message += '' if args.origlang is None else ' origlang=' + args.origlang\n message += '' if args.subset is None else ' subset=' + args.subset\n sacrelogger.error(message)\n exit(1)\n\n # Handle sentence level and quit\n if args.sentence_level:\n for output, *references in zip(system, *refs):\n results = []\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = sentence_bleu(output,\n [[x] for x in references],\n smooth_method=args.smooth,\n smooth_value=args.smooth_value)\n results.append(bleu)\n if metric == 'chrf':\n chrf = sentence_chrf(output,\n references[0],\n args.chrf_order,\n args.chrf_beta,\n remove_whitespace=not args.chrf_whitespace)\n results.append(chrf)\n\n display_metric(args.metrics, results, len(refs), args)\n\n sys.exit(0)\n\n # Else, handle system level\n results = []\n try:\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)\n results.append(bleu)\n elif metric == 'chrf':\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)\n results.append(chrf)\n except EOFError:\n sacrelogger.error('The input and reference stream(s) were of different lengths.')\n if args.test_set is not None:\n sacrelogger.error('\\nThis could be a problem with your system output or with sacreBLEU\\'s reference database.\\n'\n 'If the latter, you can clean out the references cache by typing:\\n'\n '\\n'\n ' rm -r %s/%s\\n'\n '\\n'\n 'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR,\n args.test_set)\n sys.exit(1)\n\n display_metric(args.metrics, results, len(refs), args)\n\n if args.detail:\n width = args.width\n sents_digits = len(str(len(full_system)))\n origlangs = args.origlang if args.origlang else _available_origlangs(args.test_set, args.langpair)\n for origlang in origlangs:\n subsets = [None]\n if args.subset is not None:\n subsets += [args.subset]\n elif all(t in SUBSETS for t in args.test_set.split(',')):\n subsets += COUNTRIES + DOMAINS\n for subset in subsets:\n system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset)\n if len(system) == 0:\n continue\n if subset in COUNTRIES:\n subset_str = '%20s' % ('country=' + subset)\n elif subset in DOMAINS:\n subset_str = '%20s' % ('domain=' + subset)\n else:\n subset_str = '%20s' % ''\n if 'bleu' in args.metrics:\n bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)\n print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, bleu.score, width+4, width))\n if 'chrf' in args.metrics:\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)\n print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, chrf.score, width+4, width))\n\n\ndef display_metric(metrics_to_print, results, num_refs, args):\n \"\"\"\n Badly in need of refactoring.\n One idea is to put all of this in the BLEU and CHRF classes, and then define\n a Result::signature() function.\n \"\"\"\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))\n\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(\n description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\\n'\n 'Quick usage: score your detokenized output against WMT\\'14 EN-DE:\\n'\n ' cat output.detok.de | sacrebleu -t wmt14 -l en-de',\n # epilog = 'Available test sets: ' + ','.join(sorted(DATASETS.keys(), reverse=True)),\n formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated')\n arg_parser.add_argument('-lc', action='store_true', default=False,\n help='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor', 'add-k', 'none'],\n default='exp',\n help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none')\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None,\n help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format(\n SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None,\n help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,\n help='source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None,\n help='use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation')\n arg_parser.add_argument('--subset', dest='subset', default=None,\n help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)')\n arg_parser.add_argument('--download', type=str, default=None,\n help='download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None,\n help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit')\n arg_parser.add_argument('--input', '-i', type=str, default='-',\n help='Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1,\n help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.')\n arg_parser.add_argument('refs', nargs='*', default=[],\n help='optional list of references (for backwards-compatibility with older scripts)')\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'], nargs='+',\n default=['bleu'],\n help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False,\n help='include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',\n help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',\n help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action='store_true',\n help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1,\n help='floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action='store_true',\n help='print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version',\n version='%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
27,
29,
31,
37,
42
]
}
|
[
27,
29,
31,
37,
42
] |
from django.shortcuts import render,redirect
from . import download_function
from django.http import HttpResponse
# Create your views here.
def download(request):
if request.method == "GET":
session = request.GET['session']
title = request.GET['download_title']
download_quality = request.GET['download_quality']
file_url = download_function.download_generator(session,download_quality,title)
return HttpResponse(file_url)
|
normal
|
{
"blob_id": "339506777f5471ec99b39c67c28df8ec3d06ce19",
"index": 3084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download(request):\n if request.method == 'GET':\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n file_url = download_function.download_generator(session,\n download_quality, title)\n return HttpResponse(file_url)\n",
"step-3": "from django.shortcuts import render, redirect\nfrom . import download_function\nfrom django.http import HttpResponse\n\n\ndef download(request):\n if request.method == 'GET':\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n file_url = download_function.download_generator(session,\n download_quality, title)\n return HttpResponse(file_url)\n",
"step-4": "from django.shortcuts import render,redirect\nfrom . import download_function\nfrom django.http import HttpResponse\n# Create your views here.\ndef download(request):\n if request.method == \"GET\":\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n\n file_url = download_function.download_generator(session,download_quality,title)\n return HttpResponse(file_url)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Processing:
<|reserved_special_token_0|>
@property
def vocab_size(self):
return self.__vocab_size
def normalize(self, s):
s = s.lower()
replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',
'u'), ('ñ', 'n')
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def split_punt(self, x):
words = WordPunctTokenizer().tokenize(x)
x = str(' '.join(words))
x = re.sub(' +', ' ', x)
return x
def delete_stop_words(self, x):
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))
words = x.split(' ')
words = [word for word in words if word not in self.stop_words]
x = str(' '.join(words))
return x
def stem_sentence(self, sentence):
stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(
sentence)]
return ' '.join(stemmed_text)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def eval_cell(cell):
try:
cell_array = eval(cell)
except:
cell_array = []
return cell_array
def get_actors(self, cast):
eval_cast = self.eval_cell(cast)
if len(eval_cast) > 2:
up = 3
else:
up = len(eval_cast)
actors = ''
for i in range(0, up):
actor = eval_cast[i]['name']
actor = self.normalize(actor.replace(' ', '_').lower())
actors = actors + ' ' + actor
return actors
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def split_data(self, data):
overviews = data['overview'].values
y = data['like'].values
overviews_train, overviews_test, y_train, y_test = train_test_split(
overviews, y, test_size=0.15, stratify=y, random_state=9)
return overviews_train, overviews_test, y_train, y_test
def fit_tokenizer(self, overviews_train, num_words):
self.tokenizer = Tokenizer(num_words)
self.tokenizer.fit_on_texts(overviews_train)
self.vocab_size = len(self.tokenizer.word_index) + 1
def tokenize_overview(self, overviews, max_len):
X = self.tokenizer.texts_to_sequences(overviews)
from keras.preprocessing.sequence import pad_sequences
X = pad_sequences(X, padding='pre', maxlen=max_len)
return X
def process(self, data, train_dev):
df = self.clean_overview(data)
df = self.paste_cast(df)
if train_dev:
X_train, X_test, y_train, y_test = self.split_data(df)
self.fit_tokenizer(X_train, self.n_words)
X_train = self.tokenize_overview(X_train, self.max_len)
X_test = self.tokenize_overview(X_test, self.max_len)
return X_train, X_test
else:
X = df['overview'].values
X = self.tokenize_overview(X, self.max_len)
return X
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Processing:
def __init__(self, stopwords_path='data/', tokenizer_path='models/',
max_len=80):
stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',
header=None)
stop_words = stop_words[0].tolist() + ['secuela']
self.stop_words = stop_words
self.n_words = 8000
self.max_len = max_len
try:
self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)
except:
nltk.download('popular')
self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)
with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.__vocab_size = len(self.tokenizer.word_index) + 1
@property
def vocab_size(self):
return self.__vocab_size
def normalize(self, s):
s = s.lower()
replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',
'u'), ('ñ', 'n')
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def split_punt(self, x):
words = WordPunctTokenizer().tokenize(x)
x = str(' '.join(words))
x = re.sub(' +', ' ', x)
return x
def delete_stop_words(self, x):
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))
words = x.split(' ')
words = [word for word in words if word not in self.stop_words]
x = str(' '.join(words))
return x
def stem_sentence(self, sentence):
stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(
sentence)]
return ' '.join(stemmed_text)
<|reserved_special_token_0|>
def clean_overview(self, df):
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.
delete_stop_words(x))
df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))
df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +
' ' + x['overview'], axis=1)
df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +
x['overview'], axis=1)
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.
delete_stop_words(x))
return df
@staticmethod
def eval_cell(cell):
try:
cell_array = eval(cell)
except:
cell_array = []
return cell_array
def get_actors(self, cast):
eval_cast = self.eval_cell(cast)
if len(eval_cast) > 2:
up = 3
else:
up = len(eval_cast)
actors = ''
for i in range(0, up):
actor = eval_cast[i]['name']
actor = self.normalize(actor.replace(' ', '_').lower())
actors = actors + ' ' + actor
return actors
def get_director(self, crew):
eval_crew = self.eval_cell(crew)
directors = [member['name'] for member in eval_crew if member['job'
] == 'Director']
directors = [self.normalize(director.replace(' ', '_').lower()) for
director in directors]
directors = str(' '.join(directors))
return directors
def paste_cast(self, data):
data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +
' ' + x['overview'], axis=1)
data['overview'] = data.apply(lambda x: self.get_director(x['crew']
) + x['overview'], axis=1)
return data
def split_data(self, data):
overviews = data['overview'].values
y = data['like'].values
overviews_train, overviews_test, y_train, y_test = train_test_split(
overviews, y, test_size=0.15, stratify=y, random_state=9)
return overviews_train, overviews_test, y_train, y_test
def fit_tokenizer(self, overviews_train, num_words):
self.tokenizer = Tokenizer(num_words)
self.tokenizer.fit_on_texts(overviews_train)
self.vocab_size = len(self.tokenizer.word_index) + 1
def tokenize_overview(self, overviews, max_len):
X = self.tokenizer.texts_to_sequences(overviews)
from keras.preprocessing.sequence import pad_sequences
X = pad_sequences(X, padding='pre', maxlen=max_len)
return X
def process(self, data, train_dev):
df = self.clean_overview(data)
df = self.paste_cast(df)
if train_dev:
X_train, X_test, y_train, y_test = self.split_data(df)
self.fit_tokenizer(X_train, self.n_words)
X_train = self.tokenize_overview(X_train, self.max_len)
X_test = self.tokenize_overview(X_test, self.max_len)
return X_train, X_test
else:
X = df['overview'].values
X = self.tokenize_overview(X, self.max_len)
return X
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Processing:
def __init__(self, stopwords_path='data/', tokenizer_path='models/',
max_len=80):
stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',
header=None)
stop_words = stop_words[0].tolist() + ['secuela']
self.stop_words = stop_words
self.n_words = 8000
self.max_len = max_len
try:
self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)
except:
nltk.download('popular')
self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)
with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.__vocab_size = len(self.tokenizer.word_index) + 1
@property
def vocab_size(self):
return self.__vocab_size
def normalize(self, s):
s = s.lower()
replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',
'u'), ('ñ', 'n')
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def split_punt(self, x):
words = WordPunctTokenizer().tokenize(x)
x = str(' '.join(words))
x = re.sub(' +', ' ', x)
return x
def delete_stop_words(self, x):
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))
words = x.split(' ')
words = [word for word in words if word not in self.stop_words]
x = str(' '.join(words))
return x
def stem_sentence(self, sentence):
stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(
sentence)]
return ' '.join(stemmed_text)
def augment(self, x):
try:
return self.aug.augment(x)
except:
return None
def clean_overview(self, df):
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.
delete_stop_words(x))
df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))
df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +
' ' + x['overview'], axis=1)
df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +
x['overview'], axis=1)
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.
delete_stop_words(x))
return df
@staticmethod
def eval_cell(cell):
try:
cell_array = eval(cell)
except:
cell_array = []
return cell_array
def get_actors(self, cast):
eval_cast = self.eval_cell(cast)
if len(eval_cast) > 2:
up = 3
else:
up = len(eval_cast)
actors = ''
for i in range(0, up):
actor = eval_cast[i]['name']
actor = self.normalize(actor.replace(' ', '_').lower())
actors = actors + ' ' + actor
return actors
def get_director(self, crew):
eval_crew = self.eval_cell(crew)
directors = [member['name'] for member in eval_crew if member['job'
] == 'Director']
directors = [self.normalize(director.replace(' ', '_').lower()) for
director in directors]
directors = str(' '.join(directors))
return directors
def paste_cast(self, data):
data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +
' ' + x['overview'], axis=1)
data['overview'] = data.apply(lambda x: self.get_director(x['crew']
) + x['overview'], axis=1)
return data
def split_data(self, data):
overviews = data['overview'].values
y = data['like'].values
overviews_train, overviews_test, y_train, y_test = train_test_split(
overviews, y, test_size=0.15, stratify=y, random_state=9)
return overviews_train, overviews_test, y_train, y_test
def fit_tokenizer(self, overviews_train, num_words):
self.tokenizer = Tokenizer(num_words)
self.tokenizer.fit_on_texts(overviews_train)
self.vocab_size = len(self.tokenizer.word_index) + 1
def tokenize_overview(self, overviews, max_len):
X = self.tokenizer.texts_to_sequences(overviews)
from keras.preprocessing.sequence import pad_sequences
X = pad_sequences(X, padding='pre', maxlen=max_len)
return X
def process(self, data, train_dev):
df = self.clean_overview(data)
df = self.paste_cast(df)
if train_dev:
X_train, X_test, y_train, y_test = self.split_data(df)
self.fit_tokenizer(X_train, self.n_words)
X_train = self.tokenize_overview(X_train, self.max_len)
X_test = self.tokenize_overview(X_test, self.max_len)
return X_train, X_test
else:
X = df['overview'].values
X = self.tokenize_overview(X, self.max_len)
return X
<|reserved_special_token_1|>
import string
import pandas as pd
import nltk
from nltk import word_tokenize
from nltk.stem import SnowballStemmer
from nltk.tokenize import WordPunctTokenizer
import json
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
import pickle
import re
import nlpaug.augmenter.word as naw
import nlpaug.flow as naf
class Processing:
def __init__(self, stopwords_path='data/', tokenizer_path='models/',
max_len=80):
stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',
header=None)
stop_words = stop_words[0].tolist() + ['secuela']
self.stop_words = stop_words
self.n_words = 8000
self.max_len = max_len
try:
self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)
except:
nltk.download('popular')
self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)
with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.__vocab_size = len(self.tokenizer.word_index) + 1
@property
def vocab_size(self):
return self.__vocab_size
def normalize(self, s):
s = s.lower()
replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',
'u'), ('ñ', 'n')
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def split_punt(self, x):
words = WordPunctTokenizer().tokenize(x)
x = str(' '.join(words))
x = re.sub(' +', ' ', x)
return x
def delete_stop_words(self, x):
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))
words = x.split(' ')
words = [word for word in words if word not in self.stop_words]
x = str(' '.join(words))
return x
def stem_sentence(self, sentence):
stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(
sentence)]
return ' '.join(stemmed_text)
def augment(self, x):
try:
return self.aug.augment(x)
except:
return None
def clean_overview(self, df):
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.
delete_stop_words(x))
df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))
df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +
' ' + x['overview'], axis=1)
df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +
x['overview'], axis=1)
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.
delete_stop_words(x))
return df
@staticmethod
def eval_cell(cell):
try:
cell_array = eval(cell)
except:
cell_array = []
return cell_array
def get_actors(self, cast):
eval_cast = self.eval_cell(cast)
if len(eval_cast) > 2:
up = 3
else:
up = len(eval_cast)
actors = ''
for i in range(0, up):
actor = eval_cast[i]['name']
actor = self.normalize(actor.replace(' ', '_').lower())
actors = actors + ' ' + actor
return actors
def get_director(self, crew):
eval_crew = self.eval_cell(crew)
directors = [member['name'] for member in eval_crew if member['job'
] == 'Director']
directors = [self.normalize(director.replace(' ', '_').lower()) for
director in directors]
directors = str(' '.join(directors))
return directors
def paste_cast(self, data):
data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +
' ' + x['overview'], axis=1)
data['overview'] = data.apply(lambda x: self.get_director(x['crew']
) + x['overview'], axis=1)
return data
def split_data(self, data):
overviews = data['overview'].values
y = data['like'].values
overviews_train, overviews_test, y_train, y_test = train_test_split(
overviews, y, test_size=0.15, stratify=y, random_state=9)
return overviews_train, overviews_test, y_train, y_test
def fit_tokenizer(self, overviews_train, num_words):
self.tokenizer = Tokenizer(num_words)
self.tokenizer.fit_on_texts(overviews_train)
self.vocab_size = len(self.tokenizer.word_index) + 1
def tokenize_overview(self, overviews, max_len):
X = self.tokenizer.texts_to_sequences(overviews)
from keras.preprocessing.sequence import pad_sequences
X = pad_sequences(X, padding='pre', maxlen=max_len)
return X
def process(self, data, train_dev):
df = self.clean_overview(data)
df = self.paste_cast(df)
if train_dev:
X_train, X_test, y_train, y_test = self.split_data(df)
self.fit_tokenizer(X_train, self.n_words)
X_train = self.tokenize_overview(X_train, self.max_len)
X_test = self.tokenize_overview(X_test, self.max_len)
return X_train, X_test
else:
X = df['overview'].values
X = self.tokenize_overview(X, self.max_len)
return X
<|reserved_special_token_1|>
import string
import pandas as pd
import nltk
from nltk import word_tokenize
from nltk.stem import SnowballStemmer
from nltk.tokenize import WordPunctTokenizer
import json
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
import pickle
import re
import nlpaug.augmenter.word as naw
import nlpaug.flow as naf
class Processing:
def __init__(self, stopwords_path='data/', tokenizer_path='models/', max_len=80):
# It needs a stopwords file to init
stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt', header=None)
stop_words = stop_words[0].tolist() + ['secuela']
self.stop_words = stop_words
self.n_words = 8000
self.max_len = max_len
# self.aug = naf.Sequential([
# naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action="insert", aug_p=0.1),
# naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action="substitute", aug_p=0.9),
# naw.RandomWordAug(action="delete", aug_p=0.1)
# ])
try:
self.stemmer = SnowballStemmer("spanish", ignore_stopwords=True)
except:
nltk.download("popular")
self.stemmer = SnowballStemmer("spanish", ignore_stopwords=True)
# loading
with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.__vocab_size = len(self.tokenizer.word_index) + 1
@property
def vocab_size(self):
return self.__vocab_size
def normalize(self, s):
s = s.lower()
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ñ", "n")
)
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def split_punt(self, x):
words = WordPunctTokenizer().tokenize(x)
x = str(' '.join(words))
x = re.sub(' +', ' ', x)
return x
def delete_stop_words(self, x):
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))
words = x.split(' ')
words = [word for word in words if word not in self.stop_words]
x = str(' '.join(words))
return x
def stem_sentence(self, sentence):
# Stem the sentence
stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(sentence)]
return " ".join(stemmed_text)
def augment(self, x):
try:
return self.aug.augment(x)
except:
return None
def clean_overview(self, df):
# Execute the full cleaning process into every overview
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))
df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))
df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)
df['overview'] = df.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))
return df
# Get staff and paste to overview
@staticmethod
def eval_cell(cell):
try:
cell_array = eval(cell)
except:
cell_array = []
return cell_array
def get_actors(self, cast):
eval_cast = self.eval_cell(cast)
if len(eval_cast) > 2:
up = 3
else:
up = len(eval_cast)
actors = ''
for i in range(0, up):
actor = eval_cast[i]['name']
actor = self.normalize(actor.replace(' ', '_').lower())
actors = actors + ' ' + actor
return actors
def get_director(self, crew):
eval_crew = self.eval_cell(crew)
directors = [member['name'] for member in eval_crew if member['job'] == 'Director']
directors = [self.normalize(director.replace(' ', '_').lower()) for director in directors]
directors = str(' '.join(directors))
return directors
def paste_cast(self, data):
data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)
data['overview'] = data.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)
return data
# Split train_test
def split_data(self, data):
overviews = data['overview'].values
y = data['like'].values
overviews_train, overviews_test, y_train, y_test = train_test_split(overviews, y, test_size=0.15, stratify=y,
random_state=9)
return overviews_train, overviews_test, y_train, y_test
def fit_tokenizer(self, overviews_train, num_words):
self.tokenizer = Tokenizer(num_words)
self.tokenizer.fit_on_texts(overviews_train)
# Adding 1 because of reserved 0 index
self.vocab_size = len(self.tokenizer.word_index) + 1
def tokenize_overview(self, overviews, max_len):
X = self.tokenizer.texts_to_sequences(overviews)
# print(len(max(X, key=len)))
from keras.preprocessing.sequence import pad_sequences
# We pad the sentence for the left to fit with max_len
X = pad_sequences(X, padding='pre', maxlen=max_len)
# print(X[1])
return X
def process(self, data, train_dev):
df = self.clean_overview(data)
df = self.paste_cast(df)
if train_dev:
X_train, X_test, y_train, y_test = self.split_data(df)
self.fit_tokenizer(X_train, self.n_words)
X_train = self.tokenize_overview(X_train, self.max_len)
X_test = self.tokenize_overview(X_test, self.max_len)
return X_train, X_test
else:
X = df['overview'].values
X = self.tokenize_overview(X, self.max_len)
return X
|
flexible
|
{
"blob_id": "326b2dcbef339aeb196bef23debad75fa079b121",
"index": 6435,
"step-1": "<mask token>\n\n\nclass Processing:\n <mask token>\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n <mask token>\n <mask token>\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n <mask token>\n <mask token>\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-2": "<mask token>\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/',\n max_len=80):\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',\n header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n try:\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n except:\n nltk.download('popular')\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n <mask token>\n\n def clean_overview(self, df):\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +\n x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n return df\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n\n def get_director(self, crew):\n eval_crew = self.eval_cell(crew)\n directors = [member['name'] for member in eval_crew if member['job'\n ] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for\n director in directors]\n directors = str(' '.join(directors))\n return directors\n\n def paste_cast(self, data):\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']\n ) + x['overview'], axis=1)\n return data\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-3": "<mask token>\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/',\n max_len=80):\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',\n header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n try:\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n except:\n nltk.download('popular')\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n\n def augment(self, x):\n try:\n return self.aug.augment(x)\n except:\n return None\n\n def clean_overview(self, df):\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +\n x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n return df\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n\n def get_director(self, crew):\n eval_crew = self.eval_cell(crew)\n directors = [member['name'] for member in eval_crew if member['job'\n ] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for\n director in directors]\n directors = str(' '.join(directors))\n return directors\n\n def paste_cast(self, data):\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']\n ) + x['overview'], axis=1)\n return data\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-4": "import string\nimport pandas as pd\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.stem import SnowballStemmer\nfrom nltk.tokenize import WordPunctTokenizer\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import Tokenizer\nimport pickle\nimport re\nimport nlpaug.augmenter.word as naw\nimport nlpaug.flow as naf\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/',\n max_len=80):\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',\n header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n try:\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n except:\n nltk.download('popular')\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n\n def augment(self, x):\n try:\n return self.aug.augment(x)\n except:\n return None\n\n def clean_overview(self, df):\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +\n x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n return df\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n\n def get_director(self, crew):\n eval_crew = self.eval_cell(crew)\n directors = [member['name'] for member in eval_crew if member['job'\n ] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for\n director in directors]\n directors = str(' '.join(directors))\n return directors\n\n def paste_cast(self, data):\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']\n ) + x['overview'], axis=1)\n return data\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-5": "import string\nimport pandas as pd\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.stem import SnowballStemmer\nfrom nltk.tokenize import WordPunctTokenizer\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import Tokenizer\nimport pickle\nimport re\nimport nlpaug.augmenter.word as naw\nimport nlpaug.flow as naf\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/', max_len=80):\n # It needs a stopwords file to init\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt', header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n # self.aug = naf.Sequential([\n # naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action=\"insert\", aug_p=0.1),\n # naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action=\"substitute\", aug_p=0.9),\n # naw.RandomWordAug(action=\"delete\", aug_p=0.1)\n # ])\n\n try:\n self.stemmer = SnowballStemmer(\"spanish\", ignore_stopwords=True)\n except:\n nltk.download(\"popular\")\n self.stemmer = SnowballStemmer(\"spanish\", ignore_stopwords=True)\n\n # loading\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = (\n (\"á\", \"a\"),\n (\"é\", \"e\"),\n (\"í\", \"i\"),\n (\"ó\", \"o\"),\n (\"ú\", \"u\"),\n (\"ñ\", \"n\")\n )\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n\n return x\n\n def stem_sentence(self, sentence):\n # Stem the sentence\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(sentence)]\n\n return \" \".join(stemmed_text)\n\n def augment(self, x):\n try:\n return self.aug.augment(x)\n except:\n return None\n\n def clean_overview(self, df):\n # Execute the full cleaning process into every overview\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))\n\n return df\n\n # Get staff and paste to overview\n @staticmethod\n def eval_cell(cell):\n\n try:\n\n cell_array = eval(cell)\n\n except:\n\n cell_array = []\n\n return cell_array\n\n def get_actors(self, cast):\n\n eval_cast = self.eval_cell(cast)\n\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n\n actors = ''\n\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n\n actors = actors + ' ' + actor\n\n return actors\n\n def get_director(self, crew):\n\n eval_crew = self.eval_cell(crew)\n\n directors = [member['name'] for member in eval_crew if member['job'] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for director in directors]\n directors = str(' '.join(directors))\n\n return directors\n\n def paste_cast(self, data):\n\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)\n\n return data\n\n # Split train_test\n def split_data(self, data):\n\n overviews = data['overview'].values\n y = data['like'].values\n\n overviews_train, overviews_test, y_train, y_test = train_test_split(overviews, y, test_size=0.15, stratify=y,\n random_state=9)\n\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n # Adding 1 because of reserved 0 index\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n\n X = self.tokenizer.texts_to_sequences(overviews)\n # print(len(max(X, key=len)))\n from keras.preprocessing.sequence import pad_sequences\n\n # We pad the sentence for the left to fit with max_len\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n # print(X[1])\n\n return X\n\n def process(self, data, train_dev):\n\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n\n if train_dev:\n\n X_train, X_test, y_train, y_test = self.split_data(df)\n\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n\n return X_train, X_test\n\n else:\n\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n\n return X\n\n\n",
"step-ids": [
12,
16,
17,
18,
19
]
}
|
[
12,
16,
17,
18,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(name='django-themes', version=__version__, packages=['django_themes'],
include_package_data=True, license='MIT License', description=
'Admin extensions to make theming django sites easier for end users of django sites'
, long_description=README, url=
'https://github.com/LegoStormtroopr/django-themes/', author=
'Samuel Spencer', author_email='sam@aristotlemetadata.com', classifiers
=['Environment :: Web Environment', 'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'], keywords=
'django themes', install_requires=['django'])
<|reserved_special_token_1|>
import os
from setuptools import setup
from django_spaghetti import __version__
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(name='django-themes', version=__version__, packages=['django_themes'],
include_package_data=True, license='MIT License', description=
'Admin extensions to make theming django sites easier for end users of django sites'
, long_description=README, url=
'https://github.com/LegoStormtroopr/django-themes/', author=
'Samuel Spencer', author_email='sam@aristotlemetadata.com', classifiers
=['Environment :: Web Environment', 'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'], keywords=
'django themes', install_requires=['django'])
<|reserved_special_token_1|>
import os
from setuptools import setup
from django_spaghetti import __version__
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-themes',
version=__version__,
packages=['django_themes'],
include_package_data=True,
license='MIT License',
description='Admin extensions to make theming django sites easier for end users of django sites',
long_description=README,
url='https://github.com/LegoStormtroopr/django-themes/',
author='Samuel Spencer',
author_email='sam@aristotlemetadata.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='django themes',
install_requires=['django'], # I mean obviously you'll have django installed if you want to use this.
)
|
flexible
|
{
"blob_id": "6e557c2b85031a0038afd6a9987e3417b926218f",
"index": 6184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\nsetup(name='django-themes', version=__version__, packages=['django_themes'],\n include_package_data=True, license='MIT License', description=\n 'Admin extensions to make theming django sites easier for end users of django sites'\n , long_description=README, url=\n 'https://github.com/LegoStormtroopr/django-themes/', author=\n 'Samuel Spencer', author_email='sam@aristotlemetadata.com', classifiers\n =['Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'], keywords=\n 'django themes', install_requires=['django'])\n",
"step-3": "import os\nfrom setuptools import setup\nfrom django_spaghetti import __version__\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\nsetup(name='django-themes', version=__version__, packages=['django_themes'],\n include_package_data=True, license='MIT License', description=\n 'Admin extensions to make theming django sites easier for end users of django sites'\n , long_description=README, url=\n 'https://github.com/LegoStormtroopr/django-themes/', author=\n 'Samuel Spencer', author_email='sam@aristotlemetadata.com', classifiers\n =['Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'], keywords=\n 'django themes', install_requires=['django'])\n",
"step-4": "import os\nfrom setuptools import setup\nfrom django_spaghetti import __version__\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-themes',\n version=__version__,\n packages=['django_themes'],\n include_package_data=True,\n license='MIT License',\n description='Admin extensions to make theming django sites easier for end users of django sites',\n long_description=README,\n url='https://github.com/LegoStormtroopr/django-themes/',\n author='Samuel Spencer',\n author_email='sam@aristotlemetadata.com',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n # Replace these appropriately if you are stuck on Python 2.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n keywords='django themes',\n install_requires=['django'], # I mean obviously you'll have django installed if you want to use this.\n\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
## 허프변환에 의한 직선 검출
# cv2.HoughLines(image, rho, theta, threshold, lines=None, srn=None, stn=None, min-theta=None, max-theta=None) => lines
# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)
# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)
# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)
# rho, theta 값이 커지면 축적배열의 크기는 작아지고, 값이 작으면 축적배열은 커진다.
# 축적배열이 크면 정교한 직선을 표현할 수 있으나, 연산량이 많아진다.
# 축적배열이 작아면 정밀한 직선을 표현할 수 없으나, 연산량이 적어 속도는 빠르다.
# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.
# lines : rho, theta 값을 담고 있는 3차원 행렬(numpy.ndarray) 형태로 리턴된다.
# rho, theta를 행렬로 표현한다고 하면 rho, theta 2개만 있으면 되는데
# c++에서 파이썬으로 넘어오면서 쓸데없는 값이 추가되었다.
# lines 의 shape은 (N, 1, 2), dtype = numpy.float32 **shape 주의할 것
# 가운데 1이 의미없는 값. 그래서 나중에 코드화할 때 [0]을 집어넣으면 된다.
# rho, theta값은 우리가 알아보기 힘들다.
## 확률적 허프 변환
# cv2.HoughLinesP(image, rho, theta, threshold, lines=None, minLineLength=None, maxLineGap=None)
# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)
# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)
# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)
# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.
# lines : 선분의 시작과 끝 좌표(x1, y1, x2, y2) 정보를 담고 있는 numpy.ndarray
# shape=(N, 1, 4), dtype = numpy.int32
# minLineLength : 검출하기 위한 선분의 최소 길이. (최소길이에 못미치면 검출X)
# maxLineGap : 직선으로 간주하기 위한 최대 에지 점 간격. 기본값 0
# 기본값이 0일 때는, _ _ 이렇게 에지에 간격이 있으면 하나의 직선으로 보지 않고,
# 이 값을 4로 줬을 때는, __ _ __ ___ 이렇게 간격이 3개 있어도 하나의 직선으로 본다.
import sys, cv2, numpy as np
# src = cv2.imread('./images/bd.png', cv2.IMREAD_GRAYSCALE)
src = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('Image load failed')
sys.exit()
edges = cv2.Canny(src, 50, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180.0, 150, minLineLength=50, maxLineGap=5) # threshold값 ↑적게검출 ↓많이검출
# 색을 칠해서 선분을 표현할 거니까 해당 edge를 BGR로 바꿔줘야함. Canny()하면 grayscale됨.
dst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
if lines is not None:
for i in range(lines.shape[0]): # N개 검출됨. N의 값은 알 수 없다.
pt1 = (lines[i][0][0], lines[i][0][1]) # 시작점 좌표, 가운데 값은 무조건 0으로
pt2 = (lines[i][0][2], lines[i][0][3]) # 끝점 좌표, 가운데 값은 무조건 0으로
cv2.line(dst, pt1, pt2, (0,255,0), 2, cv2.LINE_AA)
cv2.imshow('src',src)
cv2.imshow('edges',edges)
cv2.imshow('dst',dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "ff7cb8261f3abb70599725fe7c598c571d037226",
"index": 9535,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif src is None:\n print('Image load failed')\n sys.exit()\n<mask token>\nif lines is not None:\n for i in range(lines.shape[0]):\n pt1 = lines[i][0][0], lines[i][0][1]\n pt2 = lines[i][0][2], lines[i][0][3]\n cv2.line(dst, pt1, pt2, (0, 255, 0), 2, cv2.LINE_AA)\ncv2.imshow('src', src)\ncv2.imshow('edges', edges)\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nsrc = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)\nif src is None:\n print('Image load failed')\n sys.exit()\nedges = cv2.Canny(src, 50, 150)\nlines = cv2.HoughLinesP(edges, 1, np.pi / 180.0, 150, minLineLength=50,\n maxLineGap=5)\ndst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\nif lines is not None:\n for i in range(lines.shape[0]):\n pt1 = lines[i][0][0], lines[i][0][1]\n pt2 = lines[i][0][2], lines[i][0][3]\n cv2.line(dst, pt1, pt2, (0, 255, 0), 2, cv2.LINE_AA)\ncv2.imshow('src', src)\ncv2.imshow('edges', edges)\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-4": "import sys, cv2, numpy as np\nsrc = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)\nif src is None:\n print('Image load failed')\n sys.exit()\nedges = cv2.Canny(src, 50, 150)\nlines = cv2.HoughLinesP(edges, 1, np.pi / 180.0, 150, minLineLength=50,\n maxLineGap=5)\ndst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\nif lines is not None:\n for i in range(lines.shape[0]):\n pt1 = lines[i][0][0], lines[i][0][1]\n pt2 = lines[i][0][2], lines[i][0][3]\n cv2.line(dst, pt1, pt2, (0, 255, 0), 2, cv2.LINE_AA)\ncv2.imshow('src', src)\ncv2.imshow('edges', edges)\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-5": "## 허프변환에 의한 직선 검출\r\n# cv2.HoughLines(image, rho, theta, threshold, lines=None, srn=None, stn=None, min-theta=None, max-theta=None) => lines\r\n# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)\r\n# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)\r\n# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)\r\n\r\n# rho, theta 값이 커지면 축적배열의 크기는 작아지고, 값이 작으면 축적배열은 커진다.\r\n# 축적배열이 크면 정교한 직선을 표현할 수 있으나, 연산량이 많아진다.\r\n# 축적배열이 작아면 정밀한 직선을 표현할 수 없으나, 연산량이 적어 속도는 빠르다.\r\n\r\n# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.\r\n\r\n# lines : rho, theta 값을 담고 있는 3차원 행렬(numpy.ndarray) 형태로 리턴된다.\r\n# rho, theta를 행렬로 표현한다고 하면 rho, theta 2개만 있으면 되는데\r\n# c++에서 파이썬으로 넘어오면서 쓸데없는 값이 추가되었다.\r\n# lines 의 shape은 (N, 1, 2), dtype = numpy.float32 **shape 주의할 것\r\n# 가운데 1이 의미없는 값. 그래서 나중에 코드화할 때 [0]을 집어넣으면 된다.\r\n\r\n# rho, theta값은 우리가 알아보기 힘들다.\r\n## 확률적 허프 변환\r\n# cv2.HoughLinesP(image, rho, theta, threshold, lines=None, minLineLength=None, maxLineGap=None)\r\n# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)\r\n# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)\r\n# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)\r\n# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.\r\n\r\n# lines : 선분의 시작과 끝 좌표(x1, y1, x2, y2) 정보를 담고 있는 numpy.ndarray\r\n# shape=(N, 1, 4), dtype = numpy.int32\r\n\r\n# minLineLength : 검출하기 위한 선분의 최소 길이. (최소길이에 못미치면 검출X)\r\n# maxLineGap : 직선으로 간주하기 위한 최대 에지 점 간격. 기본값 0\r\n# 기본값이 0일 때는, _ _ 이렇게 에지에 간격이 있으면 하나의 직선으로 보지 않고,\r\n# 이 값을 4로 줬을 때는, __ _ __ ___ 이렇게 간격이 3개 있어도 하나의 직선으로 본다.\r\n\r\nimport sys, cv2, numpy as np\r\n\r\n# src = cv2.imread('./images/bd.png', cv2.IMREAD_GRAYSCALE)\r\nsrc = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)\r\nif src is None:\r\n print('Image load failed')\r\n sys.exit()\r\n\r\nedges = cv2.Canny(src, 50, 150)\r\n\r\nlines = cv2.HoughLinesP(edges, 1, np.pi/180.0, 150, minLineLength=50, maxLineGap=5) # threshold값 ↑적게검출 ↓많이검출 \r\n\r\n# 색을 칠해서 선분을 표현할 거니까 해당 edge를 BGR로 바꿔줘야함. Canny()하면 grayscale됨.\r\ndst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\r\n\r\nif lines is not None:\r\n for i in range(lines.shape[0]): # N개 검출됨. N의 값은 알 수 없다.\r\n pt1 = (lines[i][0][0], lines[i][0][1]) # 시작점 좌표, 가운데 값은 무조건 0으로\r\n pt2 = (lines[i][0][2], lines[i][0][3]) # 끝점 좌표, 가운데 값은 무조건 0으로\r\n\r\n cv2.line(dst, pt1, pt2, (0,255,0), 2, cv2.LINE_AA)\r\n\r\n\r\ncv2.imshow('src',src)\r\ncv2.imshow('edges',edges)\r\ncv2.imshow('dst',dst)\r\ncv2.waitKey()\r\n\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_should_config_start_correctly():
c = Config(mock)
assert c._entities == mock['entities']
assert c._synonimous == mock['synonimous']
assert c.templates == mock['templates']
assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [
'fizzfuzz', 'fuzz']}, 'templates': [{'text':
'{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}
def test_should_config_start_correctly():
c = Config(mock)
assert c._entities == mock['entities']
assert c._synonimous == mock['synonimous']
assert c.templates == mock['templates']
assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']
<|reserved_special_token_1|>
from src.config import Config
mock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [
'fizzfuzz', 'fuzz']}, 'templates': [{'text':
'{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}
def test_should_config_start_correctly():
c = Config(mock)
assert c._entities == mock['entities']
assert c._synonimous == mock['synonimous']
assert c.templates == mock['templates']
assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']
<|reserved_special_token_1|>
from src.config import Config
mock = {
"entities": {
"foo": [ "bar", "foobar" ]
},
"synonimous": {
"fizz": [ "fizzfuzz", "fuzz"]
},
"templates": [
{
"text": "{synonimous.fizz} and {entities.foo}",
"intention": "fizzfoo"
}
]
}
def test_should_config_start_correctly():
c = Config(mock)
assert c._entities == mock['entities']
assert c._synonimous == mock['synonimous']
assert c.templates == mock['templates']
assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']
|
flexible
|
{
"blob_id": "987f8ce668f2002b731822fa5f3de143a80aaafe",
"index": 9807,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n",
"step-3": "<mask token>\nmock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [\n 'fizzfuzz', 'fuzz']}, 'templates': [{'text':\n '{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n",
"step-4": "from src.config import Config\nmock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [\n 'fizzfuzz', 'fuzz']}, 'templates': [{'text':\n '{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n",
"step-5": "from src.config import Config\n\nmock = {\n \"entities\": {\n \"foo\": [ \"bar\", \"foobar\" ]\n },\n \"synonimous\": {\n \"fizz\": [ \"fizzfuzz\", \"fuzz\"]\n },\n \"templates\": [\n {\n \"text\": \"{synonimous.fizz} and {entities.foo}\",\n \"intention\": \"fizzfoo\"\n }\n ]\n}\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
from common.ReqLogin import req
import os
import yaml
from common import util
from TestCase.runnerBase import TestInterfaceCase
import paramunittest
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
def getYam(homeyaml):
try:
with open(homeyaml, encoding='utf-8') as f:
x = yaml.load(f)
return x
except FileNotFoundError:
print(u"找不到文件")
x = getYam(PATH("./case_user_api.yml"))
class UserinfoTest(TestInterfaceCase):
def setUp(self):
login = req.reqData(req)
self.infoma = {}
self.response = ""
self.infoma["id"] = x["testinfo"][0]["id"]
self.infoma["module"] = x["testinfo"][0]["module"]
self.infoma["intr"] = x["testinfo"][0]["intr"]
def base_check(self):
baseCheck = x["basecheck"]
if self.response["c"] == baseCheck["c"] and self.response["m"] == baseCheck["m"]:
return True
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = "接口未正确返回"
return False
def detailCkeck_list(self,case):
if self.base_check() is True:
if "list" in self.response:
util.DATA["pass"] = util.DATA["pass"] + 1
self.infoma["result"] = "通过"
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = self.response["c"]
self.infoma["casename"] = case["casename"]
util.DATA["sum"] = util.DATA["sum"] + 1
util.INFO.append(self.infoma)
def detailCheck_id(self,case):
if self.base_check() is True:
if self.response["r"]["id"] == case["data"]["id"]:
util.DATA["pass"] = util.DATA["pass"] + 1
self.infoma["result"] = "通过"
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = "断言预期与实际不符"
self.infoma["casename"] = case["casename"]
util.DATA["sum"] = util.DATA["sum"] + 1
util.INFO.append(self.infoma)
'''正常测试'''
def test_user_info_conrrect(self):
case1 = x["userinfo"]["case1"]
self.response = Login.req(Login,case1["api"],case1["data"])
self.detailCheck_id(case1)
#
# '''异常测试--value字段长度不够'''
# def test_user_info_poorvalue(self):
# case2 = x["userinfo"]["case2"]
# self.response = Login.req(Login, case2["api"], case2["data"])
# if self.check1() is True:
# if self.response["r"]["id"] != case2["data"]["id"]:
# util.DATA["pass"] = util.DATA["pass"] + 1
# self.infoma["result"] = "通过"
# else:
# util.DATA["fail"] = util.DATA["fail"] + 1
# self.infoma["result"] = "失败"
# self.infoma["reason"] = "断言预期与实际不符"
# self.infoma["casename"] = case2["casename"]
# util.DATA["sum"] = util.DATA["sum"] + 1
# util.INFO.append(self.infoma)
# '''异常测试--接口所需参数为空'''
# def test_user_info_poorkey(self):
# case3 = x["userinfo"]["case3"]
# self.response = Login.req(Login,case3["api"],case3["data"])
# if self.check1() is False:
# if self.response["massage"] == case3["massage"]:
# util.DATA["pass"] = util.DATA["pass"] + 1
# self.infoma["result"] = "通过"
# else:
# util.DATA["fail"] = util.DATA["fail"] + 1
# self.infoma["result"] = "失败"
# self.infoma["reason"] = "断言预期与实际不符"
# self.infoma["casename"] = case3["casename"]
# util.DATA["sum"] = util.DATA["sum"] + 1
# util.INFO.append(self.infoma)
def test_user_item_conrrect(self):
case1 = x["useritems"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def test_user_projectboards(self):
case1 = x["userprojectboards"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def test_me_info(self):
case1 = x["me"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.base_check(case1)
def test_me_orders(self):
case1 = x["me"]["case2"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def tearDown(self):
quit = Login.req(Login,'http://192.168.4.15:8001/api/0.2/account/signout',datas='')
if __name__ =='__main__':
suite = unittest.TestSuite()
# tests = ['test_user_info_conrrect','test_user_info_poorvalue','test_user_info_poorkey']
# suite.addTests(map(UserinfoTest,tests))
# suite.addTest(UserItemsTest("test_user_item_conrrect"))
filename = r'C:\Users\xp\Desktop\result.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'自动化测试报告',
description=u'注册- -自动化测试报告')
runner.run(suite)
|
normal
|
{
"blob_id": "aea196566bbbe9d37bf03b9b17a4062659a27bb6",
"index": 1446,
"step-1": "<mask token>\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n <mask token>\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n <mask token>\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n <mask token>\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_me_info(self):\n case1 = x['me']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.base_check(case1)\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getYam(homeyaml):\n try:\n with open(homeyaml, encoding='utf-8') as f:\n x = yaml.load(f)\n return x\n except FileNotFoundError:\n print(u'找不到文件')\n\n\n<mask token>\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n \"\"\"正常测试\"\"\"\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_me_info(self):\n case1 = x['me']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.base_check(case1)\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\n<mask token>\n",
"step-4": "<mask token>\nPATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n\n\ndef getYam(homeyaml):\n try:\n with open(homeyaml, encoding='utf-8') as f:\n x = yaml.load(f)\n return x\n except FileNotFoundError:\n print(u'找不到文件')\n\n\nx = getYam(PATH('./case_user_api.yml'))\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n \"\"\"正常测试\"\"\"\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_me_info(self):\n case1 = x['me']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.base_check(case1)\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n filename = 'C:\\\\Users\\\\xp\\\\Desktop\\\\result.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'自动化测试报告',\n description=u'注册- -自动化测试报告')\n runner.run(suite)\n",
"step-5": "import unittest\nfrom common.ReqLogin import req\nimport os\nimport yaml\nfrom common import util\nfrom TestCase.runnerBase import TestInterfaceCase\nimport paramunittest\n\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\ndef getYam(homeyaml):\n try:\n with open(homeyaml, encoding='utf-8') as f:\n x = yaml.load(f)\n return x\n except FileNotFoundError:\n print(u\"找不到文件\")\nx = getYam(PATH(\"./case_user_api.yml\"))\n\nclass UserinfoTest(TestInterfaceCase):\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = \"\"\n self.infoma[\"id\"] = x[\"testinfo\"][0][\"id\"]\n self.infoma[\"module\"] = x[\"testinfo\"][0][\"module\"]\n self.infoma[\"intr\"] = x[\"testinfo\"][0][\"intr\"]\n\n def base_check(self):\n baseCheck = x[\"basecheck\"]\n if self.response[\"c\"] == baseCheck[\"c\"] and self.response[\"m\"] == baseCheck[\"m\"]:\n return True\n else:\n util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n self.infoma[\"result\"] = \"失败\"\n self.infoma[\"reason\"] = \"接口未正确返回\"\n return False\n\n def detailCkeck_list(self,case):\n if self.base_check() is True:\n if \"list\" in self.response:\n util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n self.infoma[\"result\"] = \"通过\"\n else:\n util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n self.infoma[\"result\"] = \"失败\"\n self.infoma[\"reason\"] = self.response[\"c\"]\n self.infoma[\"casename\"] = case[\"casename\"]\n util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self,case):\n if self.base_check() is True:\n if self.response[\"r\"][\"id\"] == case[\"data\"][\"id\"]:\n util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n self.infoma[\"result\"] = \"通过\"\n else:\n util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n self.infoma[\"result\"] = \"失败\"\n self.infoma[\"reason\"] = \"断言预期与实际不符\"\n self.infoma[\"casename\"] = case[\"casename\"]\n util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n util.INFO.append(self.infoma)\n\n\n '''正常测试'''\n def test_user_info_conrrect(self):\n case1 = x[\"userinfo\"][\"case1\"]\n self.response = Login.req(Login,case1[\"api\"],case1[\"data\"])\n self.detailCheck_id(case1)\n #\n # '''异常测试--value字段长度不够'''\n # def test_user_info_poorvalue(self):\n # case2 = x[\"userinfo\"][\"case2\"]\n # self.response = Login.req(Login, case2[\"api\"], case2[\"data\"])\n # if self.check1() is True:\n # if self.response[\"r\"][\"id\"] != case2[\"data\"][\"id\"]:\n # util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n # self.infoma[\"result\"] = \"通过\"\n # else:\n # util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n # self.infoma[\"result\"] = \"失败\"\n # self.infoma[\"reason\"] = \"断言预期与实际不符\"\n # self.infoma[\"casename\"] = case2[\"casename\"]\n # util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n # util.INFO.append(self.infoma)\n # '''异常测试--接口所需参数为空'''\n # def test_user_info_poorkey(self):\n # case3 = x[\"userinfo\"][\"case3\"]\n # self.response = Login.req(Login,case3[\"api\"],case3[\"data\"])\n # if self.check1() is False:\n # if self.response[\"massage\"] == case3[\"massage\"]:\n # util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n # self.infoma[\"result\"] = \"通过\"\n # else:\n # util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n # self.infoma[\"result\"] = \"失败\"\n # self.infoma[\"reason\"] = \"断言预期与实际不符\"\n # self.infoma[\"casename\"] = case3[\"casename\"]\n # util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n # util.INFO.append(self.infoma)\n\n def test_user_item_conrrect(self):\n case1 = x[\"useritems\"][\"case1\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x[\"userprojectboards\"][\"case1\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.detailCkeck_list(case1)\n def test_me_info(self):\n case1 = x[\"me\"][\"case1\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.base_check(case1)\n def test_me_orders(self):\n case1 = x[\"me\"][\"case2\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,'http://192.168.4.15:8001/api/0.2/account/signout',datas='')\n\nif __name__ =='__main__':\n suite = unittest.TestSuite()\n # tests = ['test_user_info_conrrect','test_user_info_poorvalue','test_user_info_poorkey']\n # suite.addTests(map(UserinfoTest,tests))\n # suite.addTest(UserItemsTest(\"test_user_item_conrrect\"))\n\n filename = r'C:\\Users\\xp\\Desktop\\result.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(\n stream=fp,\n title=u'自动化测试报告',\n description=u'注册- -自动化测试报告')\n runner.run(suite)\n",
"step-ids": [
10,
11,
13,
15,
17
]
}
|
[
10,
11,
13,
15,
17
] |
from haven import haven_utils as hu
import itertools, copy
EXP_GROUPS = {}
EXP_GROUPS['starter_issam'] = hu.cartesian_exp_group({
'batch_size': 32,
'opt': {'name': 'adamW', 'lr': 0.0001, 'wd': 1e-6},
'model': {'name': 'resnext50_32x4d_ssl'},
'loss_func': {'name': 'cross_entropy'},
'max_epoch': [50]
})
EXP_GROUPS['clip'] = hu.cartesian_exp_group({
'batch_size': 32,
'model': {'name': 'clip'},
'max_epoch': [30],
})
|
normal
|
{
"blob_id": "dafefc65335a0d7e27057f51b43e52b286f5bc6b",
"index": 6067,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nEXP_GROUPS = {}\nEXP_GROUPS['starter_issam'] = hu.cartesian_exp_group({'batch_size': 32,\n 'opt': {'name': 'adamW', 'lr': 0.0001, 'wd': 1e-06}, 'model': {'name':\n 'resnext50_32x4d_ssl'}, 'loss_func': {'name': 'cross_entropy'},\n 'max_epoch': [50]})\nEXP_GROUPS['clip'] = hu.cartesian_exp_group({'batch_size': 32, 'model': {\n 'name': 'clip'}, 'max_epoch': [30]})\n",
"step-3": "from haven import haven_utils as hu\nimport itertools, copy\nEXP_GROUPS = {}\nEXP_GROUPS['starter_issam'] = hu.cartesian_exp_group({'batch_size': 32,\n 'opt': {'name': 'adamW', 'lr': 0.0001, 'wd': 1e-06}, 'model': {'name':\n 'resnext50_32x4d_ssl'}, 'loss_func': {'name': 'cross_entropy'},\n 'max_epoch': [50]})\nEXP_GROUPS['clip'] = hu.cartesian_exp_group({'batch_size': 32, 'model': {\n 'name': 'clip'}, 'max_epoch': [30]})\n",
"step-4": "from haven import haven_utils as hu\nimport itertools, copy\n\nEXP_GROUPS = {}\n\n\nEXP_GROUPS['starter_issam'] = hu.cartesian_exp_group({\n 'batch_size': 32,\n 'opt': {'name': 'adamW', 'lr': 0.0001, 'wd': 1e-6},\n 'model': {'name': 'resnext50_32x4d_ssl'},\n 'loss_func': {'name': 'cross_entropy'},\n 'max_epoch': [50]\n })\n\nEXP_GROUPS['clip'] = hu.cartesian_exp_group({\n 'batch_size': 32,\n 'model': {'name': 'clip'},\n 'max_epoch': [30],\n })",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
<|reserved_special_token_0|>
for fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):
_, right_frame = reader1.read()
if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:
left_frame = filler
else:
_, left_frame = reader2.read()
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
writer.write(new_frame)
for fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -
LEFT_SYNC_2)):
_, right_frame = reader1.read()
new_frame = np.concatenate([filler, border, right_frame], axis=1)
writer.write(new_frame)
for fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):
r1, right_frame = reader1.read()
if not r1:
break
r3, left_frame = reader3.read()
if not r3:
left_frame = filler
else:
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
writer.write(new_frame)
reader1.release()
reader2.release()
writer.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DIR = '/home/nghiatruong/Desktop'
INPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')
INPUT_2 = os.path.join(DIR, '20190715_180940.mp4')
INPUT_3 = os.path.join(DIR, '20190715_181200.mp4')
RIGHT_SYNC_1 = 1965
LEFT_SYNC_1 = 1700
RIGHT_SYNC_2 = 5765
LEFT_SYNC_2 = 1282
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
w1, h1, fc1 = get_meta(INPUT_1)
h2, w2, fc2 = get_meta(INPUT_2)
ratio = h1 / h2
w2 = int(w2 * ratio) + 1
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (
w1 + w2 + 10, h1))
border = np.zeros((h1, 10, 3), dtype='uint8')
filler = np.zeros((h1, w2, 3), dtype='uint8')
reader1 = cv2.VideoCapture(INPUT_1)
reader2 = cv2.VideoCapture(INPUT_2)
reader3 = cv2.VideoCapture(INPUT_3)
last_shape = h1, w1 + w2 + 10, 3
for fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):
_, right_frame = reader1.read()
if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:
left_frame = filler
else:
_, left_frame = reader2.read()
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
writer.write(new_frame)
for fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -
LEFT_SYNC_2)):
_, right_frame = reader1.read()
new_frame = np.concatenate([filler, border, right_frame], axis=1)
writer.write(new_frame)
for fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):
r1, right_frame = reader1.read()
if not r1:
break
r3, left_frame = reader3.read()
if not r3:
left_frame = filler
else:
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
writer.write(new_frame)
reader1.release()
reader2.release()
writer.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
import os
from tqdm import tqdm
DIR = '/home/nghiatruong/Desktop'
INPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')
INPUT_2 = os.path.join(DIR, '20190715_180940.mp4')
INPUT_3 = os.path.join(DIR, '20190715_181200.mp4')
RIGHT_SYNC_1 = 1965
LEFT_SYNC_1 = 1700
RIGHT_SYNC_2 = 5765
LEFT_SYNC_2 = 1282
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
w1, h1, fc1 = get_meta(INPUT_1)
h2, w2, fc2 = get_meta(INPUT_2)
ratio = h1 / h2
w2 = int(w2 * ratio) + 1
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (
w1 + w2 + 10, h1))
border = np.zeros((h1, 10, 3), dtype='uint8')
filler = np.zeros((h1, w2, 3), dtype='uint8')
reader1 = cv2.VideoCapture(INPUT_1)
reader2 = cv2.VideoCapture(INPUT_2)
reader3 = cv2.VideoCapture(INPUT_3)
last_shape = h1, w1 + w2 + 10, 3
for fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):
_, right_frame = reader1.read()
if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:
left_frame = filler
else:
_, left_frame = reader2.read()
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
writer.write(new_frame)
for fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -
LEFT_SYNC_2)):
_, right_frame = reader1.read()
new_frame = np.concatenate([filler, border, right_frame], axis=1)
writer.write(new_frame)
for fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):
r1, right_frame = reader1.read()
if not r1:
break
r3, left_frame = reader3.read()
if not r3:
left_frame = filler
else:
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
writer.write(new_frame)
reader1.release()
reader2.release()
writer.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
import os
from tqdm import tqdm
DIR = '/home/nghiatruong/Desktop'
INPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')
INPUT_2 = os.path.join(DIR, '20190715_180940.mp4')
INPUT_3 = os.path.join(DIR, '20190715_181200.mp4')
RIGHT_SYNC_1 = 1965
LEFT_SYNC_1 = 1700
RIGHT_SYNC_2 = 5765
LEFT_SYNC_2 = 1282
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
w1, h1, fc1 = get_meta(INPUT_1)
h2, w2, fc2 = get_meta(INPUT_2)
ratio = h1 / h2
w2 = int(w2*ratio)+1
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (w1+w2+10, h1))
border = np.zeros((h1, 10, 3), dtype='uint8')
filler = np.zeros((h1, w2, 3), dtype='uint8')
reader1 = cv2.VideoCapture(INPUT_1)
reader2 = cv2.VideoCapture(INPUT_2)
reader3 = cv2.VideoCapture(INPUT_3)
last_shape = (h1, w1+w2+10, 3)
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1)):
_, right_frame = reader1.read()
if fid < RIGHT_SYNC_1-LEFT_SYNC_1:
left_frame = filler
else:
_, left_frame = reader2.read()
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1, RIGHT_SYNC_2-LEFT_SYNC_2)):
_, right_frame = reader1.read()
new_frame = np.concatenate([filler, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(RIGHT_SYNC_2-LEFT_SYNC_2, fc1)):
r1, right_frame = reader1.read()
if not r1:
break
r3, left_frame = reader3.read()
if not r3:
left_frame = filler
else:
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
reader1.release()
reader2.release()
writer.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "f8f538773693b9d9530775094d9948626247a3bb",
"index": 6950,
"step-1": "<mask token>\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\n<mask token>\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2 * ratio) + 1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (\n w1 + w2 + 10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\nlast_shape = h1, w1 + w2 + 10, 3\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2 * ratio) + 1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (\n w1 + w2 + 10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\nlast_shape = h1, w1 + w2 + 10, 3\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\n\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2*ratio)+1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (w1+w2+10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\n\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\n\nlast_shape = (h1, w1+w2+10, 3)\nfor fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1-LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\nfor fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1, RIGHT_SYNC_2-LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\nfor fid in tqdm(range(RIGHT_SYNC_2-LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\n\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""Get pandas dataframes for a given data and month.
*get_dataframes(csvfile, spec=SPEC)* is a function to get dataframes
from *csvfile* connection under *spec* parsing instruction.
*Vintage* class addresses dataset by year and month:
Vintage(year, month).save()
Vintage(year, month).validate()
*Collection* manipulates all datasets, released at various dates:
Collection.save_all()
Collection.save_latest()
Collection.approve_latest()
Collection.approve_all()
"""
from config import LocalCSV, LATEST_DATE, SUPPORTED_DATES
from csv2df.specification import SPEC
from csv2df.reader import Reader, open_csv
from csv2df.parser import extract_tables
from csv2df.emitter import Emitter
from csv2df.validator import Validator
__all__ = ['get_dataframes', 'Vintage', 'Collection']
FREQUENCIES = ['a', 'q', 'm']
def get_dataframes(csvfile, spec=SPEC):
"""Extract dataframes from *csvfile* using *spec* parsing instructions.
Args:
csvfile (file connection or StringIO) - CSV file for parsing
spec (spec.Specification) - pasing instructions, defaults to spec.SPEC
Returns:
Three pandas dataframes at annual, qtr and monthly frequencies
in a dictionary.
"""
tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()
for t in extract_tables(csv_segment, pdef)]
emitter = Emitter(tables)
return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}
class Vintage:
"""Represents dataset release for a given year and month."""
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print("Saved dataframe to", path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print("Test values parsed OK for", self)
return True
def __repr__(self):
return "Vintage({}, {})".format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print("Checking", year, month)
vintage = Vintage(year, month)
vintage.validate()
if __name__ == "__main__":
# Collection calls
# Collection.approve_latest()
# Collection.approve_all()
# Collection.save_latest()
# Collection.save_all()
# sample Vintage call
year, month = 2015, 5
vint = Vintage(year, month)
vint.validate()
#dfa, dfq, dfm = vint.dfs()
|
normal
|
{
"blob_id": "e78c4f65d84d5b33debb415005e22f926e14d7d4",
"index": 1203,
"step-1": "<mask token>\n\n\nclass Vintage:\n <mask token>\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vintage:\n <mask token>\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property\n def dfs(self):\n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print('Saved dataframe to', path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print('Test values parsed OK for', self)\n return True\n\n def __repr__(self):\n return 'Vintage({}, {})'.format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Vintage:\n \"\"\"Represents dataset release for a given year and month.\"\"\"\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property\n def dfs(self):\n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print('Saved dataframe to', path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print('Test values parsed OK for', self)\n return True\n\n def __repr__(self):\n return 'Vintage({}, {})'.format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_dataframes(csvfile, spec=SPEC):\n \"\"\"Extract dataframes from *csvfile* using *spec* parsing instructions.\n\n Args:\n csvfile (file connection or StringIO) - CSV file for parsing\n spec (spec.Specification) - pasing instructions, defaults to spec.SPEC\n\n Returns:\n Three pandas dataframes at annual, qtr and monthly frequencies\n in a dictionary.\n \"\"\"\n tables = [t for csv_segment, pdef in Reader(csvfile, spec).items() for\n t in extract_tables(csv_segment, pdef)]\n emitter = Emitter(tables)\n return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}\n\n\nclass Vintage:\n \"\"\"Represents dataset release for a given year and month.\"\"\"\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property\n def dfs(self):\n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print('Saved dataframe to', path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print('Test values parsed OK for', self)\n return True\n\n def __repr__(self):\n return 'Vintage({}, {})'.format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n all_dates = SUPPORTED_DATES\n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print('Checking', year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\n<mask token>\n",
"step-5": "\"\"\"Get pandas dataframes for a given data and month.\n\n*get_dataframes(csvfile, spec=SPEC)* is a function to get dataframes\n from *csvfile* connection under *spec* parsing instruction.\n\n*Vintage* class addresses dataset by year and month:\n\n Vintage(year, month).save()\n Vintage(year, month).validate()\n\n*Collection* manipulates all datasets, released at various dates:\n\n Collection.save_all()\n Collection.save_latest()\n Collection.approve_latest()\n Collection.approve_all()\n\"\"\"\n\nfrom config import LocalCSV, LATEST_DATE, SUPPORTED_DATES \nfrom csv2df.specification import SPEC\nfrom csv2df.reader import Reader, open_csv\nfrom csv2df.parser import extract_tables\nfrom csv2df.emitter import Emitter\nfrom csv2df.validator import Validator\n\n\n__all__ = ['get_dataframes', 'Vintage', 'Collection']\n\nFREQUENCIES = ['a', 'q', 'm']\n\n\ndef get_dataframes(csvfile, spec=SPEC):\n \"\"\"Extract dataframes from *csvfile* using *spec* parsing instructions.\n\n Args:\n csvfile (file connection or StringIO) - CSV file for parsing\n spec (spec.Specification) - pasing instructions, defaults to spec.SPEC\n\n Returns:\n Three pandas dataframes at annual, qtr and monthly frequencies\n in a dictionary.\n \"\"\"\n tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()\n for t in extract_tables(csv_segment, pdef)]\n emitter = Emitter(tables)\n return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}\n\n\nclass Vintage:\n \"\"\"Represents dataset release for a given year and month.\"\"\"\n\n def __init__(self, year, month):\n self.year, self.month = year, month\n self.csv = LocalCSV(year, month)\n\n @property \n def dfs(self): \n with open_csv(self.csv.interim) as csvfile:\n return get_dataframes(csvfile)\n\n def save(self):\n for freq, df in self.dfs.items():\n path = self.csv.processed(freq)\n df.to_csv(path)\n print(\"Saved dataframe to\", path)\n return True\n\n def validate(self):\n checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])\n checker.run()\n print(\"Test values parsed OK for\", self)\n return True\n\n def __repr__(self):\n return \"Vintage({}, {})\".format(self.year, self.month)\n\n\nclass Collection:\n \"\"\"Methods to manipulate entire set of data releases.\"\"\"\n\n all_dates = SUPPORTED_DATES \n latest_vintage = Vintage(*LATEST_DATE)\n\n @classmethod\n def save_latest(cls):\n cls.latest_vintage.save()\n\n @classmethod\n def approve_latest(cls):\n \"\"\"Quick check for algorithm on latest available data.\"\"\"\n cls.latest_vintage.validate()\n\n @classmethod\n def save_all(cls):\n for year, month in cls.all_dates:\n Vintage(year, month).save()\n\n @classmethod\n def approve_all(cls):\n \"\"\"Checks all dates, runs for about 1-2 min of a fast computer.\n May fail if dataset not complete, eg word2csv written only part\n of CSV file.\n \"\"\"\n for year, month in cls.all_dates:\n print(\"Checking\", year, month)\n vintage = Vintage(year, month)\n vintage.validate()\n\n\nif __name__ == \"__main__\":\n # Collection calls\n # Collection.approve_latest()\n # Collection.approve_all()\n # Collection.save_latest()\n # Collection.save_all()\n\n # sample Vintage call\n year, month = 2015, 5\n vint = Vintage(year, month)\n vint.validate()\n #dfa, dfq, dfm = vint.dfs()\n",
"step-ids": [
9,
13,
14,
15,
19
]
}
|
[
9,
13,
14,
15,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Welcome aboard, Oleksij!')
|
flexible
|
{
"blob_id": "2b1ec29d665aa93cd53644b62efcd1305b34e13e",
"index": 2636,
"step-1": "<mask token>\n",
"step-2": "print('Welcome aboard, Oleksij!')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fileMD(self):
salt_ = os.urandom(32).hex()
hash_object = hashlib.md5()
hash_object.update(('%s%s' % (salt_, self.theFile)).encode('utf-8'))
print('MD5 Hash: ' + hash_object.hexdigest())
<|reserved_special_token_1|>
import hashlib
import os
def fileMD(self):
salt_ = os.urandom(32).hex()
hash_object = hashlib.md5()
hash_object.update(('%s%s' % (salt_, self.theFile)).encode('utf-8'))
print('MD5 Hash: ' + hash_object.hexdigest())
<|reserved_special_token_1|>
import hashlib
import os
def fileMD(self):
salt_ = os.urandom(32).hex()
hash_object = hashlib.md5()
hash_object.update(('%s%s' % (salt_, self.theFile)).encode('utf-8'))
print("MD5 Hash: "+hash_object.hexdigest())
|
flexible
|
{
"blob_id": "bc9718fa57046888961d1b5245abefa8f752e983",
"index": 8103,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fileMD(self):\n salt_ = os.urandom(32).hex()\n hash_object = hashlib.md5()\n hash_object.update(('%s%s' % (salt_, self.theFile)).encode('utf-8'))\n print('MD5 Hash: ' + hash_object.hexdigest())\n",
"step-3": "import hashlib\nimport os\n\n\ndef fileMD(self):\n salt_ = os.urandom(32).hex()\n hash_object = hashlib.md5()\n hash_object.update(('%s%s' % (salt_, self.theFile)).encode('utf-8'))\n print('MD5 Hash: ' + hash_object.hexdigest())\n",
"step-4": "import hashlib\nimport os\n\n\ndef fileMD(self):\n salt_ = os.urandom(32).hex()\n hash_object = hashlib.md5()\n hash_object.update(('%s%s' % (salt_, self.theFile)).encode('utf-8'))\n print(\"MD5 Hash: \"+hash_object.hexdigest())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ClassRoom:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def Name2Id(room_id, name):
bool_n = bool(re.match('教\\d{1}-\\d{3}', name))
bool_id = bool(re.match('B\\d{1}R\\d{3}', room_id))
if not (bool_id or bool_n):
return False
elif bool_n:
room_id = 'B' + name[1] + 'R' + name[3:6]
else:
name = '教' + room_id[1] + '-' + room_id[3:6]
return room_id, name
def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):
if not ClassRoom.Name2Id(room_id, name):
self.WrongFlag = 1
else:
self.id, self.name = ClassRoom.Name2Id(room_id, name)
self.seats = seats
self.key_id = key_id
self.event = event
ClassRoom.PullClassroom(self)
def PullClassroom(self):
result = self.__mycol.find_one({'_id': self.id})
if result:
self.name = self.name or result['name']
self.seats = self.seats or result['seats']
self.key_id = self.key_id or result['key_id']
self.event = self.event or result['event']
return self
else:
return False
def TurnDict(self):
mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,
'key_id': self.key_id, 'event': self.event}
return mydict
def PushClassroom(self):
mydict = self.TurnDict()
if self.__mycol.find_one({'_id': self.id}):
myquery = {'_id': self.id}
self.__mycol.update(myquery, mydict)
return 'Acc_Updated'
else:
self.__mycol.insert_one(mydict)
return 'Acc_Created'
def AllClassroom(self):
cursor = self.__mycol.find()
if cursor:
return cursor
else:
return False
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClassRoom:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def Name2Id(room_id, name):
bool_n = bool(re.match('教\\d{1}-\\d{3}', name))
bool_id = bool(re.match('B\\d{1}R\\d{3}', room_id))
if not (bool_id or bool_n):
return False
elif bool_n:
room_id = 'B' + name[1] + 'R' + name[3:6]
else:
name = '教' + room_id[1] + '-' + room_id[3:6]
return room_id, name
def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):
if not ClassRoom.Name2Id(room_id, name):
self.WrongFlag = 1
else:
self.id, self.name = ClassRoom.Name2Id(room_id, name)
self.seats = seats
self.key_id = key_id
self.event = event
ClassRoom.PullClassroom(self)
def PullClassroom(self):
result = self.__mycol.find_one({'_id': self.id})
if result:
self.name = self.name or result['name']
self.seats = self.seats or result['seats']
self.key_id = self.key_id or result['key_id']
self.event = self.event or result['event']
return self
else:
return False
def TurnDict(self):
mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,
'key_id': self.key_id, 'event': self.event}
return mydict
def PushClassroom(self):
mydict = self.TurnDict()
if self.__mycol.find_one({'_id': self.id}):
myquery = {'_id': self.id}
self.__mycol.update(myquery, mydict)
return 'Acc_Updated'
else:
self.__mycol.insert_one(mydict)
return 'Acc_Created'
def AllClassroom(self):
cursor = self.__mycol.find()
if cursor:
return cursor
else:
return False
def Delete(self):
User.mycol.delete_one({'_id': self.id})
return 'Deleted'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClassRoom:
__myclient = pymongo.MongoClient('mongodb://localhost:27017')
__mydb = __myclient['MMKeyDB']
__mycol = __mydb['ClassRoom']
def Name2Id(room_id, name):
bool_n = bool(re.match('教\\d{1}-\\d{3}', name))
bool_id = bool(re.match('B\\d{1}R\\d{3}', room_id))
if not (bool_id or bool_n):
return False
elif bool_n:
room_id = 'B' + name[1] + 'R' + name[3:6]
else:
name = '教' + room_id[1] + '-' + room_id[3:6]
return room_id, name
def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):
if not ClassRoom.Name2Id(room_id, name):
self.WrongFlag = 1
else:
self.id, self.name = ClassRoom.Name2Id(room_id, name)
self.seats = seats
self.key_id = key_id
self.event = event
ClassRoom.PullClassroom(self)
def PullClassroom(self):
result = self.__mycol.find_one({'_id': self.id})
if result:
self.name = self.name or result['name']
self.seats = self.seats or result['seats']
self.key_id = self.key_id or result['key_id']
self.event = self.event or result['event']
return self
else:
return False
def TurnDict(self):
mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,
'key_id': self.key_id, 'event': self.event}
return mydict
def PushClassroom(self):
mydict = self.TurnDict()
if self.__mycol.find_one({'_id': self.id}):
myquery = {'_id': self.id}
self.__mycol.update(myquery, mydict)
return 'Acc_Updated'
else:
self.__mycol.insert_one(mydict)
return 'Acc_Created'
def AllClassroom(self):
cursor = self.__mycol.find()
if cursor:
return cursor
else:
return False
def Delete(self):
User.mycol.delete_one({'_id': self.id})
return 'Deleted'
if __name__ == '__main__':
index = ClassRoom().AllClassroom()
for i in index:
print(i)
<|reserved_special_token_1|>
import pymongo
import os, sys
import re
from db_User import *
from db_Event import *
class ClassRoom:
__myclient = pymongo.MongoClient('mongodb://localhost:27017')
__mydb = __myclient['MMKeyDB']
__mycol = __mydb['ClassRoom']
def Name2Id(room_id, name):
bool_n = bool(re.match('教\\d{1}-\\d{3}', name))
bool_id = bool(re.match('B\\d{1}R\\d{3}', room_id))
if not (bool_id or bool_n):
return False
elif bool_n:
room_id = 'B' + name[1] + 'R' + name[3:6]
else:
name = '教' + room_id[1] + '-' + room_id[3:6]
return room_id, name
def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):
if not ClassRoom.Name2Id(room_id, name):
self.WrongFlag = 1
else:
self.id, self.name = ClassRoom.Name2Id(room_id, name)
self.seats = seats
self.key_id = key_id
self.event = event
ClassRoom.PullClassroom(self)
def PullClassroom(self):
result = self.__mycol.find_one({'_id': self.id})
if result:
self.name = self.name or result['name']
self.seats = self.seats or result['seats']
self.key_id = self.key_id or result['key_id']
self.event = self.event or result['event']
return self
else:
return False
def TurnDict(self):
mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,
'key_id': self.key_id, 'event': self.event}
return mydict
def PushClassroom(self):
mydict = self.TurnDict()
if self.__mycol.find_one({'_id': self.id}):
myquery = {'_id': self.id}
self.__mycol.update(myquery, mydict)
return 'Acc_Updated'
else:
self.__mycol.insert_one(mydict)
return 'Acc_Created'
def AllClassroom(self):
cursor = self.__mycol.find()
if cursor:
return cursor
else:
return False
def Delete(self):
User.mycol.delete_one({'_id': self.id})
return 'Deleted'
if __name__ == '__main__':
index = ClassRoom().AllClassroom()
for i in index:
print(i)
<|reserved_special_token_1|>
import pymongo
import os,sys
import re
from db_User import *
from db_Event import *
class ClassRoom:
# 链接本地客户端
__myclient = pymongo.MongoClient("mongodb://localhost:27017")
# 创建数据库
__mydb = __myclient["MMKeyDB"]
# 创建新的集合
__mycol = __mydb["ClassRoom"]
# 判断是否输入id或是输入name,如果有输入则转译
def Name2Id(room_id,name):
bool_n = bool(re.match("教\d{1}-\d{3}",name))
bool_id = bool(re.match("B\d{1}R\d{3}",room_id))
if not (bool_id or bool_n):
return False
elif bool_n:
room_id = "B" + name[1] + "R" + name[3:6]
else:
name = "教" + room_id[1] + "-" + room_id[3:6]
return room_id,name
def __init__(self,
room_id = "",
name = "",
seats = 0,
key_id = "",
event = []):
if not(ClassRoom.Name2Id(room_id,name)):
self.WrongFlag = 1
else:
self.id,self.name = ClassRoom.Name2Id(room_id,name)
self.seats = seats
self.key_id = key_id
self.event = event
ClassRoom.PullClassroom(self)
def PullClassroom(self):
result = self.__mycol.find_one({ "_id": self.id })
if result:
self.name = self.name or result['name']
self.seats = self.seats or result['seats']
self.key_id= self.key_id or result['key_id']
self.event = self.event or result['event']
return self
else:
return False
def TurnDict(self):
mydict = {
"_id" : self.id ,
"name" : self.name,
"seats" : self.seats,
"key_id" : self.key_id,
"event" : self.event}
return mydict
def PushClassroom(self):
mydict = self.TurnDict()
if self.__mycol.find_one({ "_id": self.id }):
myquery = {"_id" : self.id}
self.__mycol.update(myquery,mydict)
return "Acc_Updated"
else:
self.__mycol.insert_one(mydict) # 上传新的document
return "Acc_Created"
def AllClassroom(self):
cursor = self.__mycol.find()
# __import__('ipdb').set_trace()
if cursor:
# index = []
# for doc in cursor:
# print(doc)
# temp = [doc['_id'],doc['name'],doc['seats'],doc['event']]
# index.append(temp)
return cursor
else:
return False
# 删除教室记录
def Delete(self):
User.mycol.delete_one({"_id": self.id})
return "Deleted"
if __name__ == '__main__':
index = ClassRoom().AllClassroom()
for i in index:
print(i)
|
flexible
|
{
"blob_id": "8dae8a89d08bc522f9a5fdde8aeb9e322fafcbec",
"index": 3251,
"step-1": "<mask token>\n\n\nclass ClassRoom:\n <mask token>\n <mask token>\n <mask token>\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClassRoom:\n <mask token>\n <mask token>\n <mask token>\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n\n def Delete(self):\n User.mycol.delete_one({'_id': self.id})\n return 'Deleted'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ClassRoom:\n __myclient = pymongo.MongoClient('mongodb://localhost:27017')\n __mydb = __myclient['MMKeyDB']\n __mycol = __mydb['ClassRoom']\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n\n def Delete(self):\n User.mycol.delete_one({'_id': self.id})\n return 'Deleted'\n\n\nif __name__ == '__main__':\n index = ClassRoom().AllClassroom()\n for i in index:\n print(i)\n",
"step-4": "import pymongo\nimport os, sys\nimport re\nfrom db_User import *\nfrom db_Event import *\n\n\nclass ClassRoom:\n __myclient = pymongo.MongoClient('mongodb://localhost:27017')\n __mydb = __myclient['MMKeyDB']\n __mycol = __mydb['ClassRoom']\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n\n def Delete(self):\n User.mycol.delete_one({'_id': self.id})\n return 'Deleted'\n\n\nif __name__ == '__main__':\n index = ClassRoom().AllClassroom()\n for i in index:\n print(i)\n",
"step-5": "import pymongo\nimport os,sys\nimport re\n\n\nfrom db_User import *\nfrom db_Event import *\n\n\nclass ClassRoom:\n # 链接本地客户端\n __myclient = pymongo.MongoClient(\"mongodb://localhost:27017\")\n # 创建数据库\n __mydb = __myclient[\"MMKeyDB\"]\n # 创建新的集合\n __mycol = __mydb[\"ClassRoom\"]\n\n # 判断是否输入id或是输入name,如果有输入则转译\n def Name2Id(room_id,name):\n bool_n = bool(re.match(\"教\\d{1}-\\d{3}\",name))\n bool_id = bool(re.match(\"B\\d{1}R\\d{3}\",room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = \"B\" + name[1] + \"R\" + name[3:6]\n else:\n name = \"教\" + room_id[1] + \"-\" + room_id[3:6]\n\n return room_id,name\n\n def __init__(self,\n room_id = \"\",\n name = \"\",\n seats = 0,\n key_id = \"\",\n event = []):\n\n if not(ClassRoom.Name2Id(room_id,name)):\n self.WrongFlag = 1\n else:\n self.id,self.name = ClassRoom.Name2Id(room_id,name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({ \"_id\": self.id })\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id= self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {\n \"_id\" : self.id ,\n \"name\" : self.name,\n \"seats\" : self.seats,\n \"key_id\" : self.key_id,\n \"event\" : self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({ \"_id\": self.id }):\n myquery = {\"_id\" : self.id}\n self.__mycol.update(myquery,mydict)\n return \"Acc_Updated\"\n else:\n self.__mycol.insert_one(mydict) # 上传新的document\n return \"Acc_Created\"\n \n def AllClassroom(self):\n cursor = self.__mycol.find()\n # __import__('ipdb').set_trace()\n if cursor:\n # index = []\n # for doc in cursor:\n # print(doc)\n # temp = [doc['_id'],doc['name'],doc['seats'],doc['event']]\n # index.append(temp)\n return cursor\n else:\n return False\n\n # 删除教室记录\n def Delete(self):\n User.mycol.delete_one({\"_id\": self.id})\n return \"Deleted\"\n\nif __name__ == '__main__':\n index = ClassRoom().AllClassroom()\n for i in index:\n print(i)\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
import shelve
def quantity_posts():
try:
data = shelve.open('data')
except Exception:
print(Exception)
else:
for key, value in sorted(data.items()):
print(key, ': \t', value, '\n')
finally:
data.close()
if __name__ == "__main__":
print('begin')
quantity_posts()
print('end')
|
normal
|
{
"blob_id": "41c44b32ce3329cbba5b9b336c4266bb20de31f0",
"index": 5151,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef quantity_posts():\n try:\n data = shelve.open('data')\n except Exception:\n print(Exception)\n else:\n for key, value in sorted(data.items()):\n print(key, ': \\t', value, '\\n')\n finally:\n data.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef quantity_posts():\n try:\n data = shelve.open('data')\n except Exception:\n print(Exception)\n else:\n for key, value in sorted(data.items()):\n print(key, ': \\t', value, '\\n')\n finally:\n data.close()\n\n\nif __name__ == '__main__':\n print('begin')\n quantity_posts()\n print('end')\n",
"step-4": "import shelve\n\n\ndef quantity_posts():\n try:\n data = shelve.open('data')\n except Exception:\n print(Exception)\n else:\n for key, value in sorted(data.items()):\n print(key, ': \\t', value, '\\n')\n finally:\n data.close()\n\n\nif __name__ == '__main__':\n print('begin')\n quantity_posts()\n print('end')\n",
"step-5": "import shelve\r\n\r\ndef quantity_posts():\r\n try:\r\n data = shelve.open('data')\r\n except Exception:\r\n print(Exception)\r\n else:\r\n for key, value in sorted(data.items()):\r\n print(key, ': \\t', value, '\\n')\r\n finally:\r\n data.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print('begin')\r\n quantity_posts()\r\n print('end')\r\n \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os,sys,glob
sys.path.append("../../../../libs/VASNet/")
from VASNet_frame_scoring_lib import *
sys.path.append("../../../config")
from config import *
if __name__ == '__main__':
#************************************************************************
# Purpose: frame scoring (Summarizing Videos with Attention)
# Inputs:
# - path_pretrained_model: path pretrained model
# - path_feature: path feature extraction of video(' .npy' with shape: x,1024 (GoogLeNet))
# Output: Score
# Author: Trivl
#************************************************************************
path_pretrained_model = cfg.PATH_DRDSN_PRETRAINED_MODEL
path_feature = cfg.PATH_FEATURE_GOOGLENET
from os import walk
f = []
for (dirpath, dirnames, filenames) in walk(path_feature):
f.extend(filenames)
break
for i in f:
features = np.load(os.path.join(path_feature,i))
score = get_VASNet_score(features,path_pretrained_model=path_pretrained_model)
sys.exit(0)
|
normal
|
{
"blob_id": "ce97da4aab2b9de40267730168690475c899526d",
"index": 3924,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../../../../libs/VASNet/')\n<mask token>\nsys.path.append('../../../config')\n<mask token>\nif __name__ == '__main__':\n path_pretrained_model = cfg.PATH_DRDSN_PRETRAINED_MODEL\n path_feature = cfg.PATH_FEATURE_GOOGLENET\n from os import walk\n f = []\n for dirpath, dirnames, filenames in walk(path_feature):\n f.extend(filenames)\n break\n for i in f:\n features = np.load(os.path.join(path_feature, i))\n score = get_VASNet_score(features, path_pretrained_model=\n path_pretrained_model)\n sys.exit(0)\n",
"step-3": "import os, sys, glob\nsys.path.append('../../../../libs/VASNet/')\nfrom VASNet_frame_scoring_lib import *\nsys.path.append('../../../config')\nfrom config import *\nif __name__ == '__main__':\n path_pretrained_model = cfg.PATH_DRDSN_PRETRAINED_MODEL\n path_feature = cfg.PATH_FEATURE_GOOGLENET\n from os import walk\n f = []\n for dirpath, dirnames, filenames in walk(path_feature):\n f.extend(filenames)\n break\n for i in f:\n features = np.load(os.path.join(path_feature, i))\n score = get_VASNet_score(features, path_pretrained_model=\n path_pretrained_model)\n sys.exit(0)\n",
"step-4": "import os,sys,glob\nsys.path.append(\"../../../../libs/VASNet/\")\nfrom VASNet_frame_scoring_lib import *\nsys.path.append(\"../../../config\")\nfrom config import *\n\n\nif __name__ == '__main__':\n #************************************************************************\n # Purpose: frame scoring (Summarizing Videos with Attention)\n # Inputs:\n # - path_pretrained_model: path pretrained model\n # - path_feature: path feature extraction of video(' .npy' with shape: x,1024 (GoogLeNet)) \n # Output: Score\n # Author: Trivl\n #************************************************************************\n\n path_pretrained_model = cfg.PATH_DRDSN_PRETRAINED_MODEL\n path_feature = cfg.PATH_FEATURE_GOOGLENET\n from os import walk\n f = []\n for (dirpath, dirnames, filenames) in walk(path_feature):\n f.extend(filenames)\n break\n for i in f:\n features = np.load(os.path.join(path_feature,i))\n score = get_VASNet_score(features,path_pretrained_model=path_pretrained_model)\n sys.exit(0)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#from graph import *
#ex = open('ex_K.py', 'r')
#ex.read()
import ex_K
ex = ex_K
print "digraph K {"
print (str(ex.K))
print "}"
|
normal
|
{
"blob_id": "44dbb7587530fac9e538dfe31c7df15b1a016251",
"index": 7091,
"step-1": "#from graph import *\r\n#ex = open('ex_K.py', 'r')\r\n#ex.read()\r\nimport ex_K\r\nex = ex_K\r\n\r\nprint \"digraph K {\"\r\nprint (str(ex.K))\r\nprint \"}\"\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
#
# RPi.Spark KeyButton Demo
#
# Author: Kunpeng Zhang
# 2018.6.6
#
# See LICENSE for details.
from time import sleep
import RPi.GPIO as GPIO
from JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL
########################################################################
# Key buttons include Joystick buttons and Action buttons,
# use BCM mode, there are keyboard layout:
#
# [JOY UP]
# [JOY LEFT] [JOY RIGHT] [ACT_A] [ACT_B]
# [JOY DOWN]
#
class CONFIG_KEY:
# Action Buttons BCM_IO_NUM
BUTTON_ACT_A = 22
BUTTON_ACT_B = 23
# Joy Buttons BCM_IO_NUM
BUTTON_JOY_LEFT = 26
BUTTON_JOY_RIGHT = 27
BUTTON_JOY_UP = 5
BUTTON_JOY_DOWN = 6
BUTTON_JOY_OK = 24
class demo:
_myKey = None
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A: return "BUTTON_A"
if keyBtn == CONFIG_KEY.BUTTON_ACT_B: return "BUTTON_B"
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP: return "JOY_UP"
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN: return "JOY_DOWN"
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT: return "JOY_RIGHT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT: return "JOY_LEFT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK: return "JOY_CENTER"
return "UNKNOW"
def onKeyButtonDown(self, channel):
print("DOWN:\t{}".format(self._getKeyButtonName(channel)))
pass
def onKeyButtonUp(self, channel):
print("UP:\t{}\n".format(self._getKeyButtonName(channel)))
pass
def _callbackKeyButton(self, channel):
"""!
Key button interrupt event callback function
Inherit this method to implement your want
"""
if self._myKey.readKeyButton(channel) == 0:
self.onKeyButtonDown(channel)
return
if self._myKey.readKeyButton(channel) == 1:
self.onKeyButtonUp(channel)
return
def initKeyButtons(self, mode = "INT"):
"""!
Init all key buttons interrupt events or query mode.
Inherit the onKeyButtonDown and onKeyButtonUp to implement your want
@param mode: Can be { "INT" | "QUERY" }, default is "INT"
"""
if mode.upper() == "INT":
try:
self._myKey.configKeyButtons(
enableButtons = [
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":self._callbackKeyButton}
],
bounceTime = DEF_BOUNCE_TIME_SHORT_MON )
except:
pass
if mode.upper() == "QUERY":
self._myKey.configKeyButtons([
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":None},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":None}
])
def releaseKeyButtons(self):
"""!
Release all key button events
"""
self._myKey.removeKeyButtonEvent([
CONFIG_KEY.BUTTON_ACT_A,
CONFIG_KEY.BUTTON_ACT_B,
CONFIG_KEY.BUTTON_JOY_UP,
CONFIG_KEY.BUTTON_JOY_DOWN,
CONFIG_KEY.BUTTON_JOY_LEFT,
CONFIG_KEY.BUTTON_JOY_RIGHT,
CONFIG_KEY.BUTTON_JOY_OK
])
def readKeyButton(self, keyBtn):
"""!
Read key button status, return 0 / 1
"""
if self._myKey.readKeyButton( keyBtn ) == 0:
sleep(0.02)
return 0 if self._myKey.readKeyButton( keyBtn ) else 1
return 0
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print("\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n")
self.initKeyButtons("INT")
while True:
if self.readExitButtonStatus(): break
pass
self.releaseKeyButtons()
GPIO.cleanup()
if __name__ == "__main__":
demo().run()
print("Key buttons demo is end.")
|
normal
|
{
"blob_id": "50c274e0365f2556a46eb58edcd1f0a7301e89db",
"index": 8716,
"step-1": "<mask token>\n\n\nclass demo:\n <mask token>\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CONFIG_KEY:\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n demo().run()\n print('Key buttons demo is end.')\n",
"step-4": "from time import sleep\nimport RPi.GPIO as GPIO\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL\n\n\nclass CONFIG_KEY:\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n demo().run()\n print('Key buttons demo is end.')\n",
"step-5": "# -*- coding: utf-8 -*-\n#\n# RPi.Spark KeyButton Demo\n#\n# Author: Kunpeng Zhang\n# 2018.6.6\n#\n# See LICENSE for details.\n\nfrom time import sleep\nimport RPi.GPIO as GPIO\n\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL\n\n########################################################################\n# Key buttons include Joystick buttons and Action buttons, \n# use BCM mode, there are keyboard layout:\n# \n# [JOY UP] \n# [JOY LEFT] [JOY RIGHT] [ACT_A] [ACT_B]\n# [JOY DOWN] \n#\nclass CONFIG_KEY:\n # Action Buttons BCM_IO_NUM\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n \n # Joy Buttons BCM_IO_NUM\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A: return \"BUTTON_A\"\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B: return \"BUTTON_B\"\n \n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP: return \"JOY_UP\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN: return \"JOY_DOWN\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT: return \"JOY_RIGHT\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT: return \"JOY_LEFT\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK: return \"JOY_CENTER\"\n return \"UNKNOW\"\n\n def onKeyButtonDown(self, channel):\n print(\"DOWN:\\t{}\".format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print(\"UP:\\t{}\\n\".format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode = \"INT\"):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == \"INT\":\n try:\n self._myKey.configKeyButtons(\n enableButtons = [\n {\"id\":CONFIG_KEY.BUTTON_ACT_A, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_ACT_B, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_UP, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_DOWN, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_LEFT, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_RIGHT, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_OK, \"callback\":self._callbackKeyButton}\n ],\n bounceTime = DEF_BOUNCE_TIME_SHORT_MON )\n except:\n pass\n\n if mode.upper() == \"QUERY\":\n self._myKey.configKeyButtons([\n {\"id\":CONFIG_KEY.BUTTON_ACT_A, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_ACT_B, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_OK, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_UP, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_DOWN, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_LEFT, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_RIGHT, \"callback\":None}\n ])\n \n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([\n CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B,\n CONFIG_KEY.BUTTON_JOY_UP,\n CONFIG_KEY.BUTTON_JOY_DOWN,\n CONFIG_KEY.BUTTON_JOY_LEFT,\n CONFIG_KEY.BUTTON_JOY_RIGHT,\n CONFIG_KEY.BUTTON_JOY_OK\n ])\n \n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton( keyBtn ) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton( keyBtn ) else 1\n return 0\n \n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\"\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n\")\n self.initKeyButtons(\"INT\")\n\n while True:\n if self.readExitButtonStatus(): break\n pass\n\n self.releaseKeyButtons()\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n demo().run()\n print(\"Key buttons demo is end.\")",
"step-ids": [
6,
12,
15,
16,
17
]
}
|
[
6,
12,
15,
16,
17
] |
import tensorflow as tf
import numpy as np
def safe_nanmax(x):
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore',
r'All-NaN (slice|axis) encountered')
return np.nanmax(x)
def safe_nanargmax(x):
try:
return np.nanargmax(x)
except ValueError:
return np.nan
def upper_triangular_flat(A):
ones = tf.ones_like(A)
mask_a = tf.matrix_band_part(ones, 0, -1)
mask_b = tf.matrix_band_part(ones, 0, 0)
mask = tf.cast(mask_a - mask_b, dtype=tf.bool)
return tf.boolean_mask(A, mask)
def pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = tf.matmul(embeddings, tf.transpose(embeddings))
square_norm = tf.diag_part(dot_product)
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = tf.expand_dims(square_norm, 1) - 2.0 * \
dot_product + tf.expand_dims(square_norm, 0)
distances = tf.maximum(distances, 0.0)
if not squared:
mask = tf.to_float(tf.equal(distances, 0.0))
distances = distances + mask * 1e-16
distances = tf.sqrt(distances)
distances = distances * (1.0 - mask)
return distances
def contrastive_score(labels, dist, thresholds, metric="accuracy"):
d = {}
if isinstance(metric, list):
for m in metric:
d[m] = True
else:
d[metric] = True
res = {}
if "total" in d:
res["total"] = tf.size(labels)
if "f1" in d:
precision = contrastive_score(
labels, dist, thresholds, metric="precision")
recall = contrastive_score(labels, dist, thresholds, metric="recall")
res["f1"] = 2 * precision * recall / (precision + recall)
if "bacc" in d:
specificity = contrastive_score(
labels, dist, thresholds, metric="specificity")
recall = contrastive_score(labels, dist, thresholds, metric="recall")
res["metric"] = (specificity + recall) / 2
th = tf.reshape(thresholds, [1, -1])
dist = tf.reshape(dist, [-1, 1])
labels = tf.cast(tf.reshape(labels, [-1, 1]), tf.int32)
pred = tf.cast(dist < th, tf.int32)
tp = pred * labels
tn = (1 - pred) * (1 - labels)
corr = tp + tn
tp = tf.reduce_sum(tf.cast(tp, tf.float32), axis=0)
tn = tf.reduce_sum(tf.cast(tn, tf.float32), axis=0)
pred = tf.cast(pred, tf.float32)
corr = tf.cast(corr, tf.float32)
labels = tf.cast(labels, tf.float32)
if "accuracy" in d:
res["accuracy"] = tf.reduce_mean(corr, axis=0)
if "precision" in d:
res["precision"] = tp / tf.reduce_sum(pred, axis=0)
if "recall" in d:
res["recall"] = tp / tf.reduce_sum(labels)
if "specificity" in d:
res["specificity"] = tn / tf.reduce_sum(1 - labels)
if "tp" in d:
res["tp"] = tp
if "tn" in d:
res["tn"] = tn
if "pcp" in d:
res["pcp"] = tf.reduce_sum(pred, axis=0)
if "pcn" in d:
res["pcn"] = tf.reduce_sum(1 - pred, axis=0)
if "cp" in d:
res["cp"] = tf.reduce_sum(labels)
if "cn" in d:
res["cn"] = tf.reduce_sum(1 - labels)
if len(d) != len(res):
raise NotImplementedError("some metrics were not implemented")
if not isinstance(metric, list):
return next(iter(res.values()))
return res
def triplet_score(labels, embeddings, thresholds, metric="accuracy"):
dist = pairwise_distances(embeddings)
labels = tf.reshape(labels, [-1, 1])
pair_labels = tf.cast(tf.equal(labels, tf.transpose(labels)), tf.int32)
flat_labels = upper_triangular_flat(pair_labels)
flat_dist = upper_triangular_flat(dist)
return contrastive_score(flat_labels, flat_dist, thresholds, metric=metric)
class BatchScorer:
def __init__(self):
self._tp = 0
self._tn = 0
self._pcp = 0
self._pcn = 0
self._cp = 0
self._cn = 0
self._total = 0
def score(self, y_true, y_pred, metric):
raise NotImplementedError()
def handle(self, y_true, y_pred):
d = self.score(y_true, y_pred,
["tp", "tn", "pcp", "pcn", "cp", "cn", "total"])
self._tp += d["tp"]
self._tn += d["tn"]
self._pcp += d["pcp"]
self._pcn += d["pcn"]
self._cp += d["cp"]
self._cn += d["cn"]
self._total += d["total"]
def result(self, metric):
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
if metric == "accuracy":
return (self._tp + self._tn) / self._total
if metric == "precision":
return self._tp / self._pcp
if metric == "recall":
return self._tp / self._cp
if metric == "specificity":
return self._tn / self._cn
if metric == "f1":
precision = self.result("precision")
recall = self.result("recall")
return 2 * precision * recall / (precision + recall)
if metric == "bacc":
recall = self.result("recall")
specificity = self.result("specificity")
return (recall + specificity) / 2
raise NotImplementedError()
class ContrastiveBatchScorer(BatchScorer):
def __init__(self, margin, *args, **kwargs):
self._margin = margin
self._sess = tf.Session()
super().__init__(*args, **kwargs)
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(
contrastive_score(
tf.convert_to_tensor(y_true, tf.float32),
tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32),
metric=metric))
class TripletBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(
triplet_score(
tf.convert_to_tensor(y_true, tf.float32),
tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32),
metric=metric))
class FlatPairBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
assert y_pred.shape[0] == y_true.shape[0] * 2
a, b = np.split(y_pred, 2)
dist = np.linalg.norm(a - b, axis=1)
return super().score(y_true, dist, metric)
class ContrastiveOnKerasMetric:
def __init__(self, margin, metric="accuracy"):
self.__name__ = "contrastive_{}".format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return contrastive_score(
labels,
embeddings,
tf.convert_to_tensor(self._margin),
metric=self._metric)
class TripletOnKerasMetric:
def __init__(self, margin, metric="accuracy"):
self.__name__ = "triplet_{}".format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return triplet_score(
labels,
embeddings,
tf.convert_to_tensor(self._margin),
metric=self._metric)
class OfflineMetric:
def __init__(self, *args, **kwargs):
self.__name__ = self.name()
def name(self):
raise NotImplementedError()
def handle_batch(self, model, x, labels, pred):
raise NotImplementedError()
def result(self):
raise NotImplementedError()
def reset(self):
pass
class SimilarityValidationMetric(OfflineMetric):
def __init__(self,
margin,
*args,
id="sim",
metric=["accuracy"],
argmax=None,
**kwargs):
self._margin = np.array(margin)
assert argmax is None or (self._margin.ndim == 1 and argmax in metric)
self._metric = metric if isinstance(metric, list) else [metric]
self._argmax = argmax
self._scorer = None
self._id = id
super().__init__(self, *args, **kwargs)
def name(self):
metrics = list(
map(lambda x: "val_{}_{}".format(self._id, x), self._metric))
if self._argmax is not None:
metrics.append("val_{}_argmax_{}".format(self._id, self._argmax))
return tuple(metrics)
def handle_batch(self, model, x, labels, pred):
self._scorer.handle(labels, pred)
def result(self):
if self._argmax is None:
metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),
self._metric)
return tuple(metrics)
else:
argmax = safe_nanargmax(self._scorer.result(self._argmax))
metrics = map(lambda x: self._scorer.result(x)[argmax],
self._metric)
return tuple(metrics) + (self._margin[argmax], )
class TripletValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="triplet", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = TripletBatchScorer(self._margin)
class ContrastiveValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="contrastive", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = ContrastiveBatchScorer(self._margin)
class FlatPairValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="fpair", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = FlatPairBatchScorer(self._margin)
|
normal
|
{
"blob_id": "16bf4583b872f038edccbac4e567c1854d65e216",
"index": 4962,
"step-1": "<mask token>\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n <mask token>\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n <mask token>\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-2": "<mask token>\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-3": "<mask token>\n\n\nclass ContrastiveBatchScorer(BatchScorer):\n\n def __init__(self, margin, *args, **kwargs):\n self._margin = margin\n self._sess = tf.Session()\n super().__init__(*args, **kwargs)\n <mask token>\n\n\nclass TripletBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(triplet_score(tf.convert_to_tensor(y_true,\n tf.float32), tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32), metric=\n metric))\n\n\nclass FlatPairBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n assert y_pred.shape[0] == y_true.shape[0] * 2\n a, b = np.split(y_pred, 2)\n dist = np.linalg.norm(a - b, axis=1)\n return super().score(y_true, dist, metric)\n\n\nclass ContrastiveOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'contrastive_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return contrastive_score(labels, embeddings, tf.convert_to_tensor(\n self._margin), metric=self._metric)\n\n\nclass TripletOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'triplet_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return triplet_score(labels, embeddings, tf.convert_to_tensor(self.\n _margin), metric=self._metric)\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-4": "<mask token>\n\n\ndef pairwise_distances(embeddings, squared=False):\n \"\"\"Compute the 2D matrix of distances between all the embeddings.\n Args:\n embeddings: tensor of shape (batch_size, embed_dim)\n squared: Boolean. If true, output is the pairwise squared euclidean \n distance matrix. \n If false, output is the pairwise euclidean distance matrix.\n Returns:\n pairwise_distances: tensor of shape (batch_size, batch_size)\n \"\"\"\n dot_product = tf.matmul(embeddings, tf.transpose(embeddings))\n square_norm = tf.diag_part(dot_product)\n distances = tf.expand_dims(square_norm, 1\n ) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)\n distances = tf.maximum(distances, 0.0)\n if not squared:\n mask = tf.to_float(tf.equal(distances, 0.0))\n distances = distances + mask * 1e-16\n distances = tf.sqrt(distances)\n distances = distances * (1.0 - mask)\n return distances\n\n\n<mask token>\n\n\nclass BatchScorer:\n\n def __init__(self):\n self._tp = 0\n self._tn = 0\n self._pcp = 0\n self._pcn = 0\n self._cp = 0\n self._cn = 0\n self._total = 0\n\n def score(self, y_true, y_pred, metric):\n raise NotImplementedError()\n\n def handle(self, y_true, y_pred):\n d = self.score(y_true, y_pred, ['tp', 'tn', 'pcp', 'pcn', 'cp',\n 'cn', 'total'])\n self._tp += d['tp']\n self._tn += d['tn']\n self._pcp += d['pcp']\n self._pcn += d['pcn']\n self._cp += d['cp']\n self._cn += d['cn']\n self._total += d['total']\n\n def result(self, metric):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings('ignore')\n if metric == 'accuracy':\n return (self._tp + self._tn) / self._total\n if metric == 'precision':\n return self._tp / self._pcp\n if metric == 'recall':\n return self._tp / self._cp\n if metric == 'specificity':\n return self._tn / self._cn\n if metric == 'f1':\n precision = self.result('precision')\n recall = self.result('recall')\n return 2 * precision * recall / (precision + recall)\n if metric == 'bacc':\n recall = self.result('recall')\n specificity = self.result('specificity')\n return (recall + specificity) / 2\n raise NotImplementedError()\n\n\nclass ContrastiveBatchScorer(BatchScorer):\n\n def __init__(self, margin, *args, **kwargs):\n self._margin = margin\n self._sess = tf.Session()\n super().__init__(*args, **kwargs)\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(contrastive_score(tf.convert_to_tensor(\n y_true, tf.float32), tf.convert_to_tensor(y_pred, tf.\n float32), tf.convert_to_tensor(self._margin, tf.float32\n ), metric=metric))\n\n\nclass TripletBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(triplet_score(tf.convert_to_tensor(y_true,\n tf.float32), tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32), metric=\n metric))\n\n\nclass FlatPairBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n assert y_pred.shape[0] == y_true.shape[0] * 2\n a, b = np.split(y_pred, 2)\n dist = np.linalg.norm(a - b, axis=1)\n return super().score(y_true, dist, metric)\n\n\nclass ContrastiveOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'contrastive_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return contrastive_score(labels, embeddings, tf.convert_to_tensor(\n self._margin), metric=self._metric)\n\n\nclass TripletOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'triplet_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return triplet_score(labels, embeddings, tf.convert_to_tensor(self.\n _margin), metric=self._metric)\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-5": "import tensorflow as tf\nimport numpy as np\n\n\ndef safe_nanmax(x):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings('ignore',\n r'All-NaN (slice|axis) encountered')\n return np.nanmax(x)\n\n\ndef safe_nanargmax(x):\n try:\n return np.nanargmax(x)\n except ValueError:\n return np.nan\n\n\ndef upper_triangular_flat(A):\n ones = tf.ones_like(A)\n mask_a = tf.matrix_band_part(ones, 0, -1)\n mask_b = tf.matrix_band_part(ones, 0, 0)\n mask = tf.cast(mask_a - mask_b, dtype=tf.bool)\n\n return tf.boolean_mask(A, mask)\n\n\ndef pairwise_distances(embeddings, squared=False):\n \"\"\"Compute the 2D matrix of distances between all the embeddings.\n Args:\n embeddings: tensor of shape (batch_size, embed_dim)\n squared: Boolean. If true, output is the pairwise squared euclidean \n distance matrix. \n If false, output is the pairwise euclidean distance matrix.\n Returns:\n pairwise_distances: tensor of shape (batch_size, batch_size)\n \"\"\"\n dot_product = tf.matmul(embeddings, tf.transpose(embeddings))\n square_norm = tf.diag_part(dot_product)\n\n # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2\n # shape (batch_size, batch_size)\n distances = tf.expand_dims(square_norm, 1) - 2.0 * \\\n dot_product + tf.expand_dims(square_norm, 0)\n\n distances = tf.maximum(distances, 0.0)\n\n if not squared:\n mask = tf.to_float(tf.equal(distances, 0.0))\n distances = distances + mask * 1e-16\n distances = tf.sqrt(distances)\n distances = distances * (1.0 - mask)\n\n return distances\n\n\ndef contrastive_score(labels, dist, thresholds, metric=\"accuracy\"):\n d = {}\n if isinstance(metric, list):\n for m in metric:\n d[m] = True\n else:\n d[metric] = True\n res = {}\n\n if \"total\" in d:\n res[\"total\"] = tf.size(labels)\n if \"f1\" in d:\n precision = contrastive_score(\n labels, dist, thresholds, metric=\"precision\")\n recall = contrastive_score(labels, dist, thresholds, metric=\"recall\")\n res[\"f1\"] = 2 * precision * recall / (precision + recall)\n if \"bacc\" in d:\n specificity = contrastive_score(\n labels, dist, thresholds, metric=\"specificity\")\n recall = contrastive_score(labels, dist, thresholds, metric=\"recall\")\n res[\"metric\"] = (specificity + recall) / 2\n\n th = tf.reshape(thresholds, [1, -1])\n dist = tf.reshape(dist, [-1, 1])\n\n labels = tf.cast(tf.reshape(labels, [-1, 1]), tf.int32)\n pred = tf.cast(dist < th, tf.int32)\n\n tp = pred * labels\n tn = (1 - pred) * (1 - labels)\n corr = tp + tn\n\n tp = tf.reduce_sum(tf.cast(tp, tf.float32), axis=0)\n tn = tf.reduce_sum(tf.cast(tn, tf.float32), axis=0)\n pred = tf.cast(pred, tf.float32)\n corr = tf.cast(corr, tf.float32)\n labels = tf.cast(labels, tf.float32)\n\n if \"accuracy\" in d:\n res[\"accuracy\"] = tf.reduce_mean(corr, axis=0)\n if \"precision\" in d:\n res[\"precision\"] = tp / tf.reduce_sum(pred, axis=0)\n if \"recall\" in d:\n res[\"recall\"] = tp / tf.reduce_sum(labels)\n if \"specificity\" in d:\n res[\"specificity\"] = tn / tf.reduce_sum(1 - labels)\n if \"tp\" in d:\n res[\"tp\"] = tp\n if \"tn\" in d:\n res[\"tn\"] = tn\n if \"pcp\" in d:\n res[\"pcp\"] = tf.reduce_sum(pred, axis=0)\n if \"pcn\" in d:\n res[\"pcn\"] = tf.reduce_sum(1 - pred, axis=0)\n if \"cp\" in d:\n res[\"cp\"] = tf.reduce_sum(labels)\n if \"cn\" in d:\n res[\"cn\"] = tf.reduce_sum(1 - labels)\n\n if len(d) != len(res):\n raise NotImplementedError(\"some metrics were not implemented\")\n if not isinstance(metric, list):\n return next(iter(res.values()))\n return res\n\n\ndef triplet_score(labels, embeddings, thresholds, metric=\"accuracy\"):\n dist = pairwise_distances(embeddings)\n labels = tf.reshape(labels, [-1, 1])\n pair_labels = tf.cast(tf.equal(labels, tf.transpose(labels)), tf.int32)\n flat_labels = upper_triangular_flat(pair_labels)\n flat_dist = upper_triangular_flat(dist)\n\n return contrastive_score(flat_labels, flat_dist, thresholds, metric=metric)\n\n\nclass BatchScorer:\n def __init__(self):\n self._tp = 0\n self._tn = 0\n self._pcp = 0\n self._pcn = 0\n self._cp = 0\n self._cn = 0\n self._total = 0\n\n def score(self, y_true, y_pred, metric):\n raise NotImplementedError()\n\n def handle(self, y_true, y_pred):\n d = self.score(y_true, y_pred,\n [\"tp\", \"tn\", \"pcp\", \"pcn\", \"cp\", \"cn\", \"total\"])\n self._tp += d[\"tp\"]\n self._tn += d[\"tn\"]\n self._pcp += d[\"pcp\"]\n self._pcn += d[\"pcn\"]\n self._cp += d[\"cp\"]\n self._cn += d[\"cn\"]\n self._total += d[\"total\"]\n\n def result(self, metric):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\")\n\n if metric == \"accuracy\":\n return (self._tp + self._tn) / self._total\n if metric == \"precision\":\n return self._tp / self._pcp\n if metric == \"recall\":\n return self._tp / self._cp\n if metric == \"specificity\":\n return self._tn / self._cn\n if metric == \"f1\":\n precision = self.result(\"precision\")\n recall = self.result(\"recall\")\n return 2 * precision * recall / (precision + recall)\n if metric == \"bacc\":\n recall = self.result(\"recall\")\n specificity = self.result(\"specificity\")\n return (recall + specificity) / 2\n\n raise NotImplementedError()\n\n\nclass ContrastiveBatchScorer(BatchScorer):\n def __init__(self, margin, *args, **kwargs):\n self._margin = margin\n self._sess = tf.Session()\n super().__init__(*args, **kwargs)\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(\n contrastive_score(\n tf.convert_to_tensor(y_true, tf.float32),\n tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32),\n metric=metric))\n\n\nclass TripletBatchScorer(ContrastiveBatchScorer):\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(\n triplet_score(\n tf.convert_to_tensor(y_true, tf.float32),\n tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32),\n metric=metric))\n\n\nclass FlatPairBatchScorer(ContrastiveBatchScorer):\n def score(self, y_true, y_pred, metric):\n assert y_pred.shape[0] == y_true.shape[0] * 2\n a, b = np.split(y_pred, 2)\n dist = np.linalg.norm(a - b, axis=1)\n return super().score(y_true, dist, metric)\n\n\nclass ContrastiveOnKerasMetric:\n def __init__(self, margin, metric=\"accuracy\"):\n self.__name__ = \"contrastive_{}\".format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return contrastive_score(\n labels,\n embeddings,\n tf.convert_to_tensor(self._margin),\n metric=self._metric)\n\n\nclass TripletOnKerasMetric:\n def __init__(self, margin, metric=\"accuracy\"):\n self.__name__ = \"triplet_{}\".format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return triplet_score(\n labels,\n embeddings,\n tf.convert_to_tensor(self._margin),\n metric=self._metric)\n\n\nclass OfflineMetric:\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n def __init__(self,\n margin,\n *args,\n id=\"sim\",\n metric=[\"accuracy\"],\n argmax=None,\n **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or (self._margin.ndim == 1 and argmax in metric)\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(\n map(lambda x: \"val_{}_{}\".format(self._id, x), self._metric))\n if self._argmax is not None:\n metrics.append(\"val_{}_argmax_{}\".format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax],\n self._metric)\n return tuple(metrics) + (self._margin[argmax], )\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n def __init__(self, *args, id=\"triplet\", **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n def __init__(self, *args, id=\"contrastive\", **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n def __init__(self, *args, id=\"fpair\", **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-ids": [
18,
20,
32,
39,
46
]
}
|
[
18,
20,
32,
39,
46
] |
<|reserved_special_token_0|>
class NamingConvention:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NamingConvention:
<|reserved_special_token_0|>
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(
__file__)), 'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NamingConvention:
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(
__file__)), 'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import json
class NamingConvention:
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(
__file__)), 'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
<|reserved_special_token_1|>
"""
Created on 02.09.2013
@author: Paul Schweizer
@email: paulschweizer@gmx.net
@brief: Holds all the namingconventions for pandora's box
"""
import os
import json
class NamingConvention():
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
# end for constant in constants
# end def __init__
# end class NamingConvention
|
flexible
|
{
"blob_id": "d2a153fffccd4b681eebce823e641e195197cde7",
"index": 54,
"step-1": "<mask token>\n\n\nclass NamingConvention:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NamingConvention:\n <mask token>\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-3": "<mask token>\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-4": "<mask token>\nimport os\nimport json\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-5": "\"\"\"\nCreated on 02.09.2013\n@author: Paul Schweizer\n@email: paulschweizer@gmx.net\n@brief: Holds all the namingconventions for pandora's box\n\"\"\"\n\nimport os\nimport json\n\n\nclass NamingConvention():\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n # end for constant in constants\n # end def __init__\n# end class NamingConvention\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''
Problem Description
Given two numbers n1 and n2
1. Find prime numbers between n1 and n2, then
2. Make all possible unique combinations of numbers from the prime
numbers list you found in step 1.
3. From this new list, again find all prime numbers.
4. Find smallest (a) and largest (b) number from the 2nd generated
list, also count of this list.
5. Consider smallest and largest number as the 1st and 2nd number
to generate Fibonacci series respectively till the count
(number of primes in the 2nd list).
6. Print the last number of a Fibonacci series as an output
Constraints
2 <= n1, n2 <= 100
n2 - n1 >= 35
Input Format
One line containing two space separated integers n1 and n2.
Output
Last number of a generated Fibonacci series.
Timeout
1
Test Case
Example 1
Input : 2 40
Output : 13158006689
Explanation :
1st prime list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
Combination of all the primes = [23, 25, 27, 211, 213, 217, 219,
223, 229, 231, 32, 35, 37, 311, 313, 319, 323, 329, 331, 337, 52,
53, 57, 511, 513, 517, 519, 523, 529, 531, 537, 72, 73, 75, 711,
713, 717, 719, 723, 729, 731, 737, 112, 113, 115, 117, 1113, 1117,
1119, 1123, 1129, 1131, 1137, 132, 133, 135, 137, 1311, 1317, 1319,
1323, 1329, 1331, 1337, 172, 173, 175, 177, 1711, 1713, 1719, 1723,
1729, 1731, 1737, 192, 193, 195, 197, 1911, 1913, 1917, 1923, 1929,
1931, 1937, 232, 233, 235, 237, 2311, 2313, 2317, 2319, 2329, 2331,
2337, 292, 293, 295, 297, 2911, 2913, 2917, 2919, 2923, 2931, 2937,
312, 315, 317, 3111, 3113, 3117, 3119, 3123, 3129, 3137, 372, 373,
375, 377, 3711, 3713, 3717, 3719, 3723, 3729, 3731]
2nd prime list=[193, 3137, 197, 2311, 3719, 73, 137, 331, 523,
1931, 719, 337, 211, 23, 1117, 223, 1123, 229, 37, 293, 2917,
1319, 1129, 233, 173, 3119, 113, 53, 373, 311, 313, 1913, 1723,
317]
smallest (a) = 23
largest (b) = 3719
Therefore, the last number of a Fibonacci series i.e. 34th
Fibonacci number in the series that has 23 and 3719 as the first
2 numbers is 13158006689
Example 2
Input : 30 70
Output : 2027041
Explanation
1st prime list=[31, 37, 41, 43, 47, 53, 59, 61, 67]
2nd prime list generated form combination of 1st prime list = [3137,
5953, 5347, 6761, 3761, 4337, 6737, 6131, 3767, 4759, 4153, 3167,
4159, 6143]
smallest prime in 2nd list=3137
largest prime in 2nd list=6761
Therefore, the last number of a Fibonacci series i.e. 14th
Fibonacci number in the series that has 3137 and 6761 as the first
2 numbers is 2027041
'''
# test cases passed , private cases failed
# https://www.rookieslab.com/posts/fastest-way-to-check-if-a-number-is-prime-or-not
# seive of Eratosthenes method
# N = 100
# is_prime = [1]*N
# is_prime[0] = 0
# is_prime[1] = 0
# https://www.geeksforgeeks.org/python-program-to-check-whether-a-number-is-prime-or-not/
def isPrime(n): # use to find if number is prime in 2nd list
# Corner cases
if (n <= 1) :
return False
if (n <= 3) :
return True
# This is checked so that we can skip
# middle five numbers in below loop
if (n % 2 == 0 or n % 3 == 0) :
return False
i = 5
while(i * i <= n) :
if (n % i == 0 or n % (i + 2) == 0) :
return False
i = i + 6
return True
def primeList(n1, n2):
l = []
for n in range(n1, n2+1):
if isPrime(n):
l.append(n)
return l
n1, n2 = map(int, input().split())
l1 = primeList(n1,n2)
# print(l1) - check if first list of prime numbers matches
#combining
l2 = list()
l = len(l1)
for i in range(l):
for j in range(l):
if i == j:
continue
l2.append(str(l1[i])+str(l1[j]))
l3 = primeList(int(l2[0]),int(l2[-1]))
# list of primes from the second list
l4 = []
for i in l3:
if str(i) in l2:
l4.append(i)
# print(l4) - check if secin list of prime numbers matches
x = min(l4)
y = max(l4)
count = len(l4)
# print(x,y,count) - check if smallest, largest prime and count match
for i in range(2,count):
f = x + y
x = y
y = f
print(y)
|
normal
|
{
"blob_id": "fe5050fdf010ce1c4d458b8a52ac92485a7d8cea",
"index": 5706,
"step-1": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2 + 1):\n if isPrime(n):\n l.append(n)\n return l\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2 + 1):\n if isPrime(n):\n l.append(n)\n return l\n\n\n<mask token>\nfor i in range(l):\n for j in range(l):\n if i == j:\n continue\n l2.append(str(l1[i]) + str(l1[j]))\n<mask token>\nfor i in l3:\n if str(i) in l2:\n l4.append(i)\n<mask token>\nfor i in range(2, count):\n f = x + y\n x = y\n y = f\nprint(y)\n",
"step-4": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2 + 1):\n if isPrime(n):\n l.append(n)\n return l\n\n\nn1, n2 = map(int, input().split())\nl1 = primeList(n1, n2)\nl2 = list()\nl = len(l1)\nfor i in range(l):\n for j in range(l):\n if i == j:\n continue\n l2.append(str(l1[i]) + str(l1[j]))\nl3 = primeList(int(l2[0]), int(l2[-1]))\nl4 = []\nfor i in l3:\n if str(i) in l2:\n l4.append(i)\nx = min(l4)\ny = max(l4)\ncount = len(l4)\nfor i in range(2, count):\n f = x + y\n x = y\n y = f\nprint(y)\n",
"step-5": "'''\nProblem Description\nGiven two numbers n1 and n2\n\n1. Find prime numbers between n1 and n2, then\n\n2. Make all possible unique combinations of numbers from the prime \nnumbers list you found in step 1. \n\n3. From this new list, again find all prime numbers.\n\n4. Find smallest (a) and largest (b) number from the 2nd generated \nlist, also count of this list.\n\n5. Consider smallest and largest number as the 1st and 2nd number \nto generate Fibonacci series respectively till the count \n(number of primes in the 2nd list).\n\n6. Print the last number of a Fibonacci series as an output\n\nConstraints\n2 <= n1, n2 <= 100\n\nn2 - n1 >= 35\n\nInput Format\nOne line containing two space separated integers n1 and n2.\n\nOutput\nLast number of a generated Fibonacci series.\n\nTimeout\n1\n\n\nTest Case\nExample 1\nInput : 2 40\nOutput : 13158006689\n\nExplanation :\n\n1st prime list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n\nCombination of all the primes = [23, 25, 27, 211, 213, 217, 219, \n223, 229, 231, 32, 35, 37, 311, 313, 319, 323, 329, 331, 337, 52, \n53, 57, 511, 513, 517, 519, 523, 529, 531, 537, 72, 73, 75, 711, \n713, 717, 719, 723, 729, 731, 737, 112, 113, 115, 117, 1113, 1117, \n1119, 1123, 1129, 1131, 1137, 132, 133, 135, 137, 1311, 1317, 1319, \n1323, 1329, 1331, 1337, 172, 173, 175, 177, 1711, 1713, 1719, 1723, \n1729, 1731, 1737, 192, 193, 195, 197, 1911, 1913, 1917, 1923, 1929, \n1931, 1937, 232, 233, 235, 237, 2311, 2313, 2317, 2319, 2329, 2331, \n2337, 292, 293, 295, 297, 2911, 2913, 2917, 2919, 2923, 2931, 2937, \n312, 315, 317, 3111, 3113, 3117, 3119, 3123, 3129, 3137, 372, 373, \n375, 377, 3711, 3713, 3717, 3719, 3723, 3729, 3731]\n\n2nd prime list=[193, 3137, 197, 2311, 3719, 73, 137, 331, 523, \n1931, 719, 337, 211, 23, 1117, 223, 1123, 229, 37, 293, 2917, \n1319, 1129, 233, 173, 3119, 113, 53, 373, 311, 313, 1913, 1723, \n317]\n\nsmallest (a) = 23\n\nlargest (b) = 3719\n\nTherefore, the last number of a Fibonacci series i.e. 34th \nFibonacci number in the series that has 23 and 3719 as the first \n2 numbers is 13158006689\n\nExample 2\nInput : 30 70\nOutput : 2027041 \n\nExplanation\n\n1st prime list=[31, 37, 41, 43, 47, 53, 59, 61, 67]\n\n2nd prime list generated form combination of 1st prime list = [3137, \n5953, 5347, 6761, 3761, 4337, 6737, 6131, 3767, 4759, 4153, 3167, \n4159, 6143]\n\nsmallest prime in 2nd list=3137\nlargest prime in 2nd list=6761\n\nTherefore, the last number of a Fibonacci series i.e. 14th \nFibonacci number in the series that has 3137 and 6761 as the first \n2 numbers is 2027041\n'''\n\n# test cases passed , private cases failed\n\n# https://www.rookieslab.com/posts/fastest-way-to-check-if-a-number-is-prime-or-not\n# seive of Eratosthenes method\n\n# N = 100\n# is_prime = [1]*N\n# is_prime[0] = 0\n# is_prime[1] = 0\n\n# https://www.geeksforgeeks.org/python-program-to-check-whether-a-number-is-prime-or-not/\ndef isPrime(n): # use to find if number is prime in 2nd list\n \n # Corner cases \n if (n <= 1) : \n return False\n if (n <= 3) : \n return True\n \n # This is checked so that we can skip \n # middle five numbers in below loop \n if (n % 2 == 0 or n % 3 == 0) : \n return False\n \n i = 5\n while(i * i <= n) : \n if (n % i == 0 or n % (i + 2) == 0) : \n return False\n i = i + 6\n \n return True\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2+1):\n if isPrime(n):\n l.append(n)\n return l\n\nn1, n2 = map(int, input().split())\nl1 = primeList(n1,n2)\n# print(l1) - check if first list of prime numbers matches\n\n#combining\nl2 = list()\nl = len(l1)\nfor i in range(l):\n for j in range(l):\n if i == j:\n continue\n l2.append(str(l1[i])+str(l1[j]))\n\nl3 = primeList(int(l2[0]),int(l2[-1])) \n# list of primes from the second list\nl4 = []\nfor i in l3:\n if str(i) in l2:\n l4.append(i)\n# print(l4) - check if secin list of prime numbers matches\n\nx = min(l4)\ny = max(l4)\ncount = len(l4)\n# print(x,y,count) - check if smallest, largest prime and count match\nfor i in range(2,count):\n f = x + y\n x = y\n y = f\nprint(y)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python3
# encoding: utf-8
"""
@author: ShuoChang
@license: (C) MIT.
@contact: changshuo@bupt.edu.cn
@software: CRNN_STN_SEQ
@file: decoder_base.py
@time: 2019/7/22 17:21
@blog: https://www.zhihu.com/people/chang-shuo-59/activities
"""
from abc import ABCMeta
from abc import abstractmethod
class DecoderBase(object):
"""
Base model for decoder
"""
__metaclass__ = ABCMeta
def __init__(self):
self._predictor = 'decoder'
self._label = None
pass
@abstractmethod
def set_label(self, label):
self._label = label
@abstractmethod
def predict(self, input_data):
pass
@abstractmethod
def loss(self, input_data):
pass
@abstractmethod
def sequence_dist(self, input_data):
pass
|
normal
|
{
"blob_id": "0d8a26ef4077b40e8255d5bb2ce9217b51118780",
"index": 7364,
"step-1": "<mask token>\n\n\nclass DecoderBase(object):\n <mask token>\n <mask token>\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n <mask token>\n\n @abstractmethod\n def loss(self, input_data):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DecoderBase(object):\n <mask token>\n <mask token>\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n <mask token>\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-3": "<mask token>\n\n\nclass DecoderBase(object):\n <mask token>\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n\n @abstractmethod\n def predict(self, input_data):\n pass\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-4": "<mask token>\n\n\nclass DecoderBase(object):\n \"\"\"\n Base model for decoder\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n\n @abstractmethod\n def predict(self, input_data):\n pass\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-5": "#!/usr/bin/python3\n# encoding: utf-8\n\"\"\"\n@author: ShuoChang\n@license: (C) MIT.\n@contact: changshuo@bupt.edu.cn\n@software: CRNN_STN_SEQ\n@file: decoder_base.py\n@time: 2019/7/22 17:21\n@blog: https://www.zhihu.com/people/chang-shuo-59/activities\n\"\"\"\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\n\nclass DecoderBase(object):\n \"\"\"\n Base model for decoder\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n\n @abstractmethod\n def predict(self, input_data):\n pass\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for s1, s2 in zip(A[:-1], A[1:]):
if s1 < s2:
stockNum = g // s1
g += stockNum * (s2 - s1)
print(g)
<|reserved_special_token_1|>
n = int(input())
A = list(map(int, input().split()))
g = 1000
for s1, s2 in zip(A[:-1], A[1:]):
if s1 < s2:
stockNum = g // s1
g += stockNum * (s2 - s1)
print(g)
|
flexible
|
{
"blob_id": "da903409d75ba2a07443317e30bce568444fbca5",
"index": 9956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor s1, s2 in zip(A[:-1], A[1:]):\n if s1 < s2:\n stockNum = g // s1\n g += stockNum * (s2 - s1)\nprint(g)\n",
"step-3": "n = int(input())\nA = list(map(int, input().split()))\ng = 1000\nfor s1, s2 in zip(A[:-1], A[1:]):\n if s1 < s2:\n stockNum = g // s1\n g += stockNum * (s2 - s1)\nprint(g)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python
# coding=UTF-8
import sys
import subprocess
import os
def printReportTail(reportHtmlFile):
reportHtmlFile.write("""
</body>
</html>
""")
def printReportHead(reportHtmlFile):
reportHtmlFile.write("""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
""")
def printTitle(reportHtmlFile, title):
reportHtmlFile.write("<h2>" + title + "</h2>\n")
def printText(reportHtmlFile, text):
reportHtmlFile.write("<h4>" + text + "</h4>\n")
def printSVG(reportHtmlFile, svgPath):
reportHtmlFile.write('<embed src="')
reportHtmlFile.write(svgPath)
reportHtmlFile.write('" type="image/svg+xml" />')
def ParseStack(currentPath, ndkPath, stackFile, architecture, symbolsDir):
print "currentPath: " + currentPath
# 查找addr2line文件
print "architecture is " + architecture
if architecture == "arm64-v8a":
addr2line = ndkPath + "/toolchains/aarch64-linux-android-4.9/prebuilt/darwin-x86_64/bin/aarch64-linux-android-addr2line"
elif architecture == "armeabi" or architecture == "armeabi-v7a":
addr2line = ndkPath + "/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-addr2line"
else:
print "do not support architecture type for " + architecture
print "only support armeabi/armeabi-v7a/arm64-v8a"
return
print "addr2line path: " + addr2line
if not os.path.exists(addr2line):
print "can not find " + architecture + " addr2line"
else:
print "find " + architecture + " addr2line"
reportHtmlPath = os.path.split(stackFile)[0] + "/leakReport.html"
if os.path.exists(reportHtmlPath):
os.unlink(reportHtmlPath)
reportHtmlFile = open(reportHtmlPath, "a")
printReportHead(reportHtmlFile)
# 处理stack文件
for line in open(stackFile):
if line.startswith("libName:"):
libName = line.replace("libName:", "").replace('\n', '').replace('\r', '')
printTitle(reportHtmlFile, libName)
libAbsolutePath = os.path.split(stackFile)[0] + "/" + libName
if not os.path.exists(libAbsolutePath):
os.makedirs(libAbsolutePath)
flStackFilePath = libAbsolutePath + "/fl_stack.txt"
flameGraphFile = open(flStackFilePath, "w")
print "find lib: " + libName
elif line.startswith("leakSize:"):
leakSize = line.replace("leakSize:", "").replace('\n', '').replace('\r', '')
leakMsg = "leak size: " + leakSize + "\n"
printText(reportHtmlFile, leakMsg)
print leakMsg
elif line.startswith("stack:"):
stack = line.replace("stack:", "").replace('\n', '').replace('\r', '')
# print "stack: "
for stackElement in stack.split("^"):
if stackElement == "":
continue
dlinfo = stackElement.split("|")
pc = dlinfo[0]
libPath = dlinfo[1]
symbol = dlinfo[2]
# print "pc " + pc + " " + libPath + " " + symbol
symbolFile = symbolsDir + "/" + os.path.split(libPath)[1]
if os.path.exists(symbolFile):
# print "---------"
parseCommend = addr2line + " -Ce " + symbolFile + " -f " + pc
# print parseCommend
# os.system(parseCommend)
result = os.popen(parseCommend)
res = result.read()
retraces = res.splitlines()
if len(retraces) != 2 or "?" in retraces[0] or "?" in retraces[1]:
if symbol != "":
method = symbol
codeLine = -1
else:
method = pc
codeLine = -1
else:
method = retraces[0]
codeLine = retraces[1]
# print method
# print codeLine
elif symbol != "":
method = symbol
codeLine = -1
else:
method = pc
codeLine = -1
flameGraphFile.write(method + ";")
flameGraphFile.write(" 1\n")
elif line.replace('\n', '').replace('\r', '') == "libSplit!!!":
# 结束了一个lib的输出
print "finish lib " + libName + " parse"
plExePath = os.path.split(currentPath)[0] + "/flamegraph.pl"
svgPath = libAbsolutePath + "/" + libName + ".svg"
commend = plExePath + " " + flStackFilePath + " > " + svgPath
os.system(commend)
printSVG(reportHtmlFile, svgPath.replace(os.path.split(libAbsolutePath)[0], "./"))
printReportTail(reportHtmlFile)
def main(args):
if 4 > len(args):
print("请输入\"android ndk路径\" \"stack文件路径\" \"arm架构(armeabi/armeabi-v7a/arm64-v8a)\" \"带符号表so所在目录\"")
return
ParseStack(args[0], args[1], args[2], args[3], args[4])
if __name__ == "__main__":
main(sys.argv)
|
normal
|
{
"blob_id": "b5cbb73c152dd60e9063d5a19f6182e2264fec6d",
"index": 15,
"step-1": "#!/usr/bin/python\n# coding=UTF-8\n\nimport sys\nimport subprocess\nimport os\n\ndef printReportTail(reportHtmlFile):\n reportHtmlFile.write(\"\"\"\n</body>\n</html>\n\"\"\")\n\ndef printReportHead(reportHtmlFile):\n reportHtmlFile.write(\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Document</title>\n</head>\n<body>\n\"\"\")\n\ndef printTitle(reportHtmlFile, title):\n reportHtmlFile.write(\"<h2>\" + title + \"</h2>\\n\")\n\ndef printText(reportHtmlFile, text):\n reportHtmlFile.write(\"<h4>\" + text + \"</h4>\\n\")\n\ndef printSVG(reportHtmlFile, svgPath):\n reportHtmlFile.write('<embed src=\"')\n reportHtmlFile.write(svgPath)\n reportHtmlFile.write('\" type=\"image/svg+xml\" />')\n\ndef ParseStack(currentPath, ndkPath, stackFile, architecture, symbolsDir):\n print \"currentPath: \" + currentPath\n # 查找addr2line文件\n print \"architecture is \" + architecture\n if architecture == \"arm64-v8a\":\n addr2line = ndkPath + \"/toolchains/aarch64-linux-android-4.9/prebuilt/darwin-x86_64/bin/aarch64-linux-android-addr2line\"\n elif architecture == \"armeabi\" or architecture == \"armeabi-v7a\":\n addr2line = ndkPath + \"/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-addr2line\"\n else:\n print \"do not support architecture type for \" + architecture\n print \"only support armeabi/armeabi-v7a/arm64-v8a\"\n return\n\n print \"addr2line path: \" + addr2line\n if not os.path.exists(addr2line):\n print \"can not find \" + architecture + \" addr2line\"\n else:\n print \"find \" + architecture + \" addr2line\"\n\n reportHtmlPath = os.path.split(stackFile)[0] + \"/leakReport.html\"\n if os.path.exists(reportHtmlPath):\n os.unlink(reportHtmlPath)\n reportHtmlFile = open(reportHtmlPath, \"a\")\n printReportHead(reportHtmlFile)\n\n # 处理stack文件\n for line in open(stackFile): \n\n if line.startswith(\"libName:\"):\n libName = line.replace(\"libName:\", \"\").replace('\\n', '').replace('\\r', '')\n\n printTitle(reportHtmlFile, libName)\n \n libAbsolutePath = os.path.split(stackFile)[0] + \"/\" + libName\n if not os.path.exists(libAbsolutePath):\n os.makedirs(libAbsolutePath)\n flStackFilePath = libAbsolutePath + \"/fl_stack.txt\"\n flameGraphFile = open(flStackFilePath, \"w\")\n print \"find lib: \" + libName\n elif line.startswith(\"leakSize:\"):\n leakSize = line.replace(\"leakSize:\", \"\").replace('\\n', '').replace('\\r', '')\n\n leakMsg = \"leak size: \" + leakSize + \"\\n\"\n\n printText(reportHtmlFile, leakMsg)\n\n print leakMsg\n elif line.startswith(\"stack:\"):\n stack = line.replace(\"stack:\", \"\").replace('\\n', '').replace('\\r', '')\n # print \"stack: \"\n for stackElement in stack.split(\"^\"):\n if stackElement == \"\":\n continue\n \n dlinfo = stackElement.split(\"|\")\n pc = dlinfo[0]\n libPath = dlinfo[1]\n symbol = dlinfo[2]\n # print \"pc \" + pc + \" \" + libPath + \" \" + symbol\n symbolFile = symbolsDir + \"/\" + os.path.split(libPath)[1]\n if os.path.exists(symbolFile):\n # print \"---------\"\n parseCommend = addr2line + \" -Ce \" + symbolFile + \" -f \" + pc\n # print parseCommend\n # os.system(parseCommend)\n result = os.popen(parseCommend) \n res = result.read() \n retraces = res.splitlines()\n if len(retraces) != 2 or \"?\" in retraces[0] or \"?\" in retraces[1]:\n if symbol != \"\":\n method = symbol\n codeLine = -1\n else:\n method = pc\n codeLine = -1\n else:\n method = retraces[0]\n codeLine = retraces[1]\n # print method\n # print codeLine\n elif symbol != \"\":\n method = symbol\n codeLine = -1\n else:\n method = pc\n codeLine = -1\n \n flameGraphFile.write(method + \";\")\n flameGraphFile.write(\" 1\\n\")\n elif line.replace('\\n', '').replace('\\r', '') == \"libSplit!!!\":\n # 结束了一个lib的输出\n print \"finish lib \" + libName + \" parse\"\n plExePath = os.path.split(currentPath)[0] + \"/flamegraph.pl\"\n svgPath = libAbsolutePath + \"/\" + libName + \".svg\"\n commend = plExePath + \" \" + flStackFilePath + \" > \" + svgPath\n os.system(commend)\n\n printSVG(reportHtmlFile, svgPath.replace(os.path.split(libAbsolutePath)[0], \"./\"))\n\n printReportTail(reportHtmlFile)\n\ndef main(args):\n if 4 > len(args):\n print(\"请输入\\\"android ndk路径\\\" \\\"stack文件路径\\\" \\\"arm架构(armeabi/armeabi-v7a/arm64-v8a)\\\" \\\"带符号表so所在目录\\\"\")\n return\n ParseStack(args[0], args[1], args[2], args[3], args[4])\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import ROOT
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from TreeProducer import *
from TreeProducerCommon import *
from CorrectionTools.PileupWeightTool import *
from CorrectionTools.BTaggingTool import BTagWeightTool, BTagWPs
from CorrectionTools.MuonSFs import *
from CorrectionTools.ElectronSFs import *
from CorrectionTools.RecoilCorrectionTool import getTTptWeight, getTTPt
from CorrectionTools.DYCorrection import *
import struct
import numpy as np
class LLProducer(Module):
def __init__(self, name, DataType, filelist, **kwargs):
self.name = name
self.out = TreeProducer(name)
self.sample = filelist
if DataType=='data':
self.isData = True
self.isMC = False
else:
self.isData = False
self.isMC = True
self.year = kwargs.get('year', 2017 )
self.tes = kwargs.get('tes', 1.0 )
self.ltf = kwargs.get('ltf', 1.0 )
self.jtf = kwargs.get('jtf', 1.0 )
year = self.year
self.filter = getMETFilters(year,self.isData)
if not self.isData:
self.muSFs = MuonSFs(year=year)
self.elSFs = ElectronSFs(year=year)
self.puTool = PileupWeightTool(year =year)
self.btagToolAK8_deep = BTagWeightTool('DeepCSV','AK8','loose',sigma='central',channel='ll',year=year)
self.btagToolAK8_deep_up = BTagWeightTool('DeepCSV','AK8','loose',sigma='up',channel='ll',year=year)
self.btagToolAK8_deep_down = BTagWeightTool('DeepCSV','AK8','loose',sigma='down',channel='ll',year=year)
self.btagToolAK4_deep = BTagWeightTool('DeepCSV','AK4','loose',sigma='central',channel='ll',year=year)
self.btagToolAK4_deep_up = BTagWeightTool('DeepCSV','AK4','loose',sigma='up',channel='ll',year=year)
self.btagToolAK4_deep_down = BTagWeightTool('DeepCSV','AK4','loose',sigma='down',channel='ll',year=year)
if 'DYJetsToLL' in self.sample[0]:
self.DYCorr = DYCorrection('DYJetsToLL')
elif 'ZJetsToNuNu' in self.sample[0]:
self.DYCorr = DYCorrection('ZJetsToNuNu')
elif 'WJetsToLNu' in self.sample[0]:
self.DYCorr = DYCorrection('WJetsToLNu')
self.runJEC = False
JEC_samples = ['Zprime','WWTo','WZTo','ZZTo','GluGluHToBB','ZH_HToBB','Wplus','Wminus']
for JEC_sample in JEC_samples:
if self.sample[0].find(JEC_sample)>0:
self.runJEC = True
def beginJob(self):
pass
def endJob(self):
if not self.isData:
self.btagToolAK8_deep.setDirectory(self.out.outputfile,'AK8btag_deep')
self.btagToolAK4_deep.setDirectory(self.out.outputfile,'AK4btag_deep')
self.out.outputfile.Write()
self.out.outputfile.Close()
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
pass
def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
pass
def fillBranches(self,event):
self.out.isMC[0] = self.isMC
self.out.is2016[0] = self.is2016
self.out.is2017[0] = self.is2017
self.out.is2018[0] = self.is2018
self.out.EventNumber[0] = event.event
self.out.LumiNumber[0] = event.luminosityBlock
self.out.RunNumber[0] = event.run
self.out.EventWeight[0] = self.EventWeight
self.out.TopWeight[0] = self.TopWeight
self.out.BTagAK8Weight[0] = self.BTagAK8Weight
self.out.BTagAK4Weight[0] = self.BTagAK4Weight
self.out.BTagAK8Weight_deep[0] = self.BTagAK8Weight_deep
self.out.BTagAK8Weight_deep_up[0] = self.BTagAK8Weight_deep_up
self.out.BTagAK8Weight_deep_down[0] = self.BTagAK8Weight_deep_down
self.out.BTagAK4Weight_deep[0] = self.BTagAK4Weight_deep
self.out.BTagAK4Weight_deep_up[0] = self.BTagAK4Weight_deep_up
self.out.BTagAK4Weight_deep_down[0] = self.BTagAK4Weight_deep_down
self.out.BBTagWeight[0] = self.BBTagWeight
self.out.GenWeight[0] = self.GenWeight
self.out.PUWeight[0] = self.PUWeight
self.out.LeptonWeight[0] = self.LeptonWeight
self.out.LeptonWeightUp[0] = self.LeptonWeightUp
self.out.LeptonWeightDown[0] = self.LeptonWeightDown
self.out.TriggerWeight[0] = self.TriggerWeight
self.out.TriggerWeightUp[0] = self.TriggerWeightUp
self.out.TriggerWeightDown[0] = self.TriggerWeightDown
self.out.QCDNLO_Corr[0] = self.QCDNLO_Corr
self.out.QCDNNLO_Corr[0] = self.QCDNNLO_Corr
self.out.EWKNLO_Corr[0] = self.EWKNLO_Corr
self.out.isZtoNN[0] = self.isZtoNN
self.out.isZtoEE[0] = self.isZtoEE
self.out.isZtoMM[0] = self.isZtoMM
self.out.isTtoEM[0] = self.isTtoEM
self.out.isBoosted4B[0] = self.isBoosted4B
self.out.isHtobb[0] = self.isHtobb
self.out.isHtobb_ml[0] = self.isHtobb_ml
self.out.isMaxBTag_loose[0] = self.isMaxBTag_loose
self.out.isMaxBTag_medium[0] = self.isMaxBTag_medium
self.out.isMaxBTag_tight[0] = self.isMaxBTag_tight
self.out.isVBF[0] = self.isVBF
self.out.nPV[0] = event.PV_npvsGood
self.out.nTaus[0] = self.nTaus
self.out.nElectrons[0] = self.nElectrons
self.out.nMuons[0] = self.nMuons
self.out.nJets[0] = self.nJetsNoFatJet
self.out.nFatJets[0] = self.nFatJets
self.out.DPhi[0] = self.DPhi
self.out.DEta[0] = self.VHDEta
self.out.MinDPhi[0] = self.MinJetMetDPhi
self.out.MaxBTag[0] = self.MaxJetNoFatJetBTag
self.out.BtagDeepB[0] = self.BtagDeepB
self.out.DeepTagMD_H4qvsQCD[0] = self.DeepTagMD_H4qvsQCD
self.out.DeepTagMD_HbbvsQCD[0] = self.DeepTagMD_HbbvsQCD
self.out.DeepTagMD_ZHbbvsQCD[0] = self.DeepTagMD_ZHbbvsQCD
self.out.DeepTagMD_ZbbvsQCD[0] = self.DeepTagMD_ZbbvsQCD
self.out.DeepTagMD_bbvsLight[0] = self.DeepTagMD_bbvsLight
self.out.DeepTagMD_WvsQCD[0] = self.DeepTagMD_WvsQCD
self.out.DeepTagMD_ZvsQCD[0] = self.DeepTagMD_ZvsQCD
self.out.Mu1_pt[0] = self.Mu1_pt
self.out.Mu1_eta[0] = self.Mu1_eta
self.out.Mu1_phi[0] = self.Mu1_phi
self.out.Mu1_mass[0] = self.Mu1_mass
self.out.Mu1_pfIsoId[0] = self.Mu1_pfIsoId
self.out.Mu1_relIso[0] = self.Mu1_relIso
self.out.Mu1_highPtId[0] = self.Mu1_highPtId
self.out.Mu2_pt[0] = self.Mu2_pt
self.out.Mu2_eta[0] = self.Mu2_eta
self.out.Mu2_phi[0] = self.Mu2_phi
self.out.Mu2_mass[0] = self.Mu2_mass
self.out.Mu2_pfIsoId[0] = self.Mu2_pfIsoId
self.out.Mu2_relIso[0] = self.Mu2_relIso
self.out.Mu2_highPtId[0] = self.Mu2_highPtId
self.out.Ele1_pt[0] = self.Ele1_pt
self.out.Ele1_eta[0] = self.Ele1_eta
self.out.Ele1_phi[0] = self.Ele1_phi
self.out.Ele1_mass[0] = self.Ele1_mass
self.out.Ele2_pt[0] = self.Ele2_pt
self.out.Ele2_eta[0] = self.Ele2_eta
self.out.Ele2_phi[0] = self.Ele2_phi
self.out.Ele2_mass[0] = self.Ele2_mass
self.out.Ele_HEM15_16[0] = self.Ele_HEM15_16
self.out.Jet1_VBF_pt[0] = self.Jet1_VBF_pt
self.out.Jet1_VBF_eta[0] = self.Jet1_VBF_eta
self.out.Jet1_VBF_phi[0] = self.Jet1_VBF_phi
self.out.Jet1_VBF_mass[0] = self.Jet1_VBF_mass
self.out.Jet2_VBF_pt[0] = self.Jet2_VBF_pt
self.out.Jet2_VBF_eta[0] = self.Jet2_VBF_eta
self.out.Jet2_VBF_phi[0] = self.Jet2_VBF_phi
self.out.Jet2_VBF_mass[0] = self.Jet2_VBF_mass
self.out.dijet_VBF_mass[0] = self.dijet_VBF_mass
self.out.deltaR_VBF[0] = self.deltaR_VBF
self.out.deltaR_HVBFjet1[0] = self.deltaR_HVBFjet1
self.out.deltaR_HVBFjet2[0] = self.deltaR_HVBFjet2
self.out.MET[0] = event.PuppiMET_pt
self.out.MET_chs[0] = event.MET_pt
self.out.HT_HEM15_16[0] = self.HT_HEM15_16
self.out.LHEScaleWeight = self.LHEScaleWeight
self.out.LHEPdfWeight = self.LHEPdfWeight
self.out.LHEWeight_originalXWGTUP[0]= self.LHEWeight_originalXWGTUP
self.out.PrefireWeight[0] = self.PrefireWeight
self.out.PrefireWeightUp[0] = self.PrefireWeightUp
self.out.PrefireWeightDown[0] = self.PrefireWeightDown
self.out.HT[0] = self.HT
self.out.H_pt[0] = self.H_pt
self.out.H_eta[0] = self.H_eta
self.out.H_phi[0] = self.H_phi
self.out.H_mass[0] = self.H_mass
self.out.H_M[0] = self.H_M
self.out.H_tau21[0] = self.H_tau21
self.out.H_tau41[0] = self.H_tau41
self.out.H_tau42[0] = self.H_tau42
self.out.H_tau31[0] = self.H_tau31
self.out.H_tau32[0] = self.H_tau32
self.out.H_ddt[0] = self.H_ddt
self.out.H_csv1[0] = self.H_csv1
self.out.H_csv2[0] = self.H_csv2
self.out.H_deepcsv1[0] = self.H_deepcsv1
self.out.H_deepcsv2[0] = self.H_deepcsv2
self.out.H_dbt[0] = self.H_dbt
self.out.H_hadronflavour[0] = self.H_hadronflavour
self.out.H_partonflavour[0] = self.H_partonflavour
self.out.H_chf[0] = self.H_chf
self.out.H_nhf[0] = self.H_nhf
self.out.V_pt[0] = self.V_pt
self.out.V_eta[0] = self.V_eta
self.out.V_phi[0] = self.V_phi
self.out.V_mass[0] = self.V_mass
self.out.VH_deltaR[0] = self.VH_deltaR
self.out.X_pt[0] = self.X_pt
self.out.X_eta[0] = self.X_eta
self.out.X_phi[0] = self.X_phi
self.out.X_mass[0] = self.X_mass
self.out.X_mass_chs[0] = self.X_mass_chs
self.out.X_mass_nom[0] = self.X_mass_nom
self.out.X_mass_jesUp[0] = self.X_mass_jesUp
self.out.X_mass_jesDown[0] = self.X_mass_jesDown
self.out.X_mass_jerUp[0] = self.X_mass_jerUp
self.out.X_mass_jerDown[0] = self.X_mass_jerDown
self.out.X_mass_MET_nom[0] = self.X_mass_MET_nom
self.out.X_mass_MET_jesUp[0] = self.X_mass_MET_jesUp
self.out.X_mass_MET_jesDown[0] = self.X_mass_MET_jesDown
self.out.X_mass_MET_jerUp[0] = self.X_mass_MET_jerUp
self.out.X_mass_MET_jerDown[0] = self.X_mass_MET_jerDown
self.out.H_mass_nom[0] = self.H_mass_nom
self.out.H_mass_jmsUp[0] = self.H_mass_jmsUp
self.out.H_mass_jmsDown[0] = self.H_mass_jmsDown
self.out.H_mass_jmrUp[0] = self.H_mass_jmrUp
self.out.H_mass_jmrDown[0] = self.H_mass_jmrDown
self.out.tree.Fill()
def analyze(self, event):
"""process event, return True (go to next module) or False (fail, go to next event)"""
##### set variables ####
self.nElectrons = 0
self.nMuons = 0
self.nTaus = 0
self.nFatJets = 0
self.EventWeight = 1.
self.TopWeight = 1.
self.BTagAK8Weight = 1.
self.BTagAK4Weight = 1.
self.BTagAK8Weight_deep = 1.
self.BTagAK8Weight_deep_up = 1.
self.BTagAK8Weight_deep_down = 1.
self.BTagAK4Weight_deep = 1.
self.BTagAK4Weight_deep_up = 1.
self.BTagAK4Weight_deep_down = 1.
self.BBTagWeight = 1.
self.GenWeight = 1.
self.PUWeight = 1.
self.LeptonWeight = 1.
self.LeptonWeightUp = 1.
self.LeptonWeightDown = 1.
self.TriggerWeight = 1.
self.TriggerWeightUp = 1.
self.TriggerWeightDown = 1.
self.isZtoMM = False
self.isZtoEE = False
self.isZtoNN = False
self.isTtoEM = False
self.isBoosted4B = False
self.isHtobb = False
self.isHtobb_ml = False
self.isMaxBTag_loose = False
self.isMaxBTag_medium = False
self.isMaxBTag_tight = False
self.isVBF = False
self.is2016 = False
self.is2017 = False
self.is2018 = False
self.nTaus = 0
self.nJetsNoFatJet = 0
self.H_partonflavour = -1.
self.H_hadronflavour = -1.
self.DPhi = -1.
self.VHDEta = -1.
self.MinJetMetDPhi = 10.
self.MaxJetNoFatJetBTag = -1.
self.BtagDeepB = -1.
self.DeepTagMD_H4qvsQCD = -1.
self.DeepTagMD_HbbvsQCD = -1.
self.DeepTagMD_ZHbbvsQCD = -1.
self.DeepTagMD_ZbbvsQCD = -1.
self.DeepTagMD_bbvsLight = -1.
self.DeepTagMD_WvsQCD = -1.
self.DeepTagMD_ZvsQCD = -1.
self.Mu1_pt = -1.
self.Mu1_eta = -1.
self.Mu1_phi = -1.
self.Mu1_mass = -1.
self.Mu1_pfIsoId = -1.
self.Mu1_relIso = -1.
self.Mu1_highPtId = -1.
self.Mu2_pt = -1.
self.Mu2_eta = -1.
self.Mu2_phi = -1.
self.Mu2_mass = -1.
self.Mu2_pfIsoId = -1.
self.Mu2_relIso = -1.
self.Mu2_highPtId = -1.
self.Ele1_pt = -1.
self.Ele1_eta = -1.
self.Ele1_phi = -1.
self.Ele1_mass = -1.
self.Ele2_pt = -1.
self.Ele2_eta = -1.
self.Ele2_phi = -1.
self.Ele2_mass = -1.
self.Ele_HEM15_16 = -1.
self.HT_HEM15_16 = -1.
self.HT = 0.
self.LHEScaleWeight = -1.
self.LHEPdfWeight = -1.
self.LHEWeight_originalXWGTUP = -1.
self.PrefireWeight = 1.
self.PrefireWeightUp = 1.
self.PrefireWeightDown = 1.
self.QCDNLO_Corr = 1.
self.QCDNNLO_Corr = 1.
self.EWKNLO_Corr = 1.
self.Jet1_VBF_pt = -1.
self.Jet1_VBF_eta = -1.
self.Jet1_VBF_phi = -1.
self.Jet1_VBF_mass = -1.
self.Jet2_VBF_pt = -1.
self.Jet2_VBF_eta = -1.
self.Jet2_VBF_phi = -1.
self.Jet2_VBF_mass = -1.
self.dijet_VBF_mass = -1.
self.deltaR_VBF = -1.
self.deltaR_HVBFjet1 = -1.
self.deltaR_HVBFjet2 = -1.
self.H_pt = -1.
self.H_eta = -1.
self.H_phi = -1.
self.H_mass = -1.
self.H_M = -1.
self.H_tau21 = -1.
self.H_tau41 = -1.
self.H_tau42 = -1.
self.H_tau31 = -1.
self.H_tau32 = -1.
self.H_ddt = -1.
self.H_csv1 = -1.
self.H_csv2 = -1.
self.H_deepcsv1 = -1.
self.H_deepcsv2 = -1.
self.H_dbt = -1.
self.H_chf = -1.
self.H_nhf = -1.
self.V_pt = -1.
self.V_eta = -1.
self.V_phi = -1.
self.V_mass = -1.
self.VH_deltaR = -1.
self.X_pt = -1.
self.X_eta = -1.
self.X_phi = -1.
self.X_mass = -1.
self.X_mass_chs = -1.
self.X_mass_nom = -1.
self.X_mass_jesUp = -1.
self.X_mass_jesDown = -1.
self.X_mass_jerUp = -1.
self.X_mass_jerDown = -1.
self.X_mass_MET_nom = -1.
self.X_mass_MET_jesUp = -1.
self.X_mass_MET_jesDown = -1.
self.X_mass_MET_jerUp = -1.
self.X_mass_MET_jerDown = -1.
self.H_mass_nom = -1.
self.H_mass_jmsUp = -1.
self.H_mass_jmsDown = -1.
self.H_mass_jmrUp = -1.
self.H_mass_jmrDown = -1.
eecutflow_list = []
mmcutflow_list = []
nncutflow_list = []
idx_electrons = []
idx_loose_electrons = []
idx_muons = []
idx_loose_muons = []
idx_fatjet = []
idx_jet = []
idx_jet_vbf = []
electrons_tlv_list = []
loose_electrons_tlv_list = []
muons_tlv_list = []
loose_muons_tlv_list = []
fatjet_tlv_list = []
jet_tlv_list = []
jet_tlv_list_vbf = []
fatjet_tau21_list = []
fatjet_tau41_list = []
fatjet_tau42_list = []
fatjet_tau31_list = []
fatjet_tau32_list = []
V = ROOT.TLorentzVector()
H = ROOT.TLorentzVector()
X = ROOT.TLorentzVector()
V_chs = ROOT.TLorentzVector()
######### cuts #########
elec1_pt_cut = 55.
elec2_pt_cut = 20.
elec_pt_cut = 10.
elec_eta_cut = 2.5
muon1_pt_cut = 55.
muon2_pt_cut = 20.
muon_pt_cut = 10.
muon_eta_cut = 2.4
tau_pt_cut = 18.
tau_eta_cut = 2.3
ak4_pt_cut = 30.
ak4_eta_cut = 2.4
fatjet_pt_cut = 200.
fatjet_eta_cut = 2.4
met_pt_cut = 250.
v_pt_cut = 200.
tau21_lowercut = 0.35
tau21_uppercut = 0.75
j_mass_lowercut = 30.
j_mass_uppercut = 250.
v_mass_lowercut = 65.
v_mass_intercut = 85.
v_mass_uppercut = 105.
h_mass_lowercut = 105.
h_mass_uppercut = 135.
x_mass_lowercut = 750.
xt_mass_lowercut = 650.
xjj_mass_lowercut = 950.
#### flag for year #######
if self.year == 2016:
self.is2016 = True
elif self.year == 2017:
self.is2017 = True
elif self.year == 2018:
self.is2018 = True
######### triggers #########
if self.year == 2016:
try:
trigger_SingleMu = any([event.HLT_Mu50,
event.HLT_TkMu50])
except:
trigger_SingleMu = event.HLT_Mu50
trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT
trigger_SingleIsoEle = event.HLT_Ele27_WPTight_Gsf
trigger_SinglePhoton = event.HLT_Photon175
trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,
event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,
event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])
trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight,
event.HLT_PFMET120_PFMHT120_IDTight])
trigger_MET = any([event.HLT_PFMET170_NotCleaned,
event.HLT_PFMET170_HBHECleaned])
elif self.year == 2017:
try:
trigger_SingleMu = any([event.HLT_Mu50,
event.HLT_TkMu100,
event.HLT_OldMu100])
except:
trigger_SingleMu = event.HLT_Mu50
try:
trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT
except:
trigger_SingleEle = None
trigger_SingleIsoEle = event.HLT_Ele35_WPTight_Gsf
trigger_SinglePhoton = event.HLT_Photon200
try:
trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,
event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,
event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,
event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,
event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])
except:
trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,
event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,
event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])
trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight,
event.HLT_PFMET120_PFMHT120_IDTight,
event.HLT_PFMET130_PFMHT130_IDTight,
event.HLT_PFMET140_PFMHT140_IDTight,
event.HLT_PFMETTypeOne110_PFMHT110_IDTight,
event.HLT_PFMETTypeOne120_PFMHT120_IDTight,
event.HLT_PFMETTypeOne130_PFMHT130_IDTight,
event.HLT_PFMETTypeOne140_PFMHT140_IDTight])
try:
trigger_MET = any([event.HLT_PFMET200_NotCleaned,
event.HLT_PFMET200_HBHECleaned,
event.HLT_PFMET200_HBHE_BeamHaloCleaned,
event.HLT_PFMET250_HBHECleaned])
except:
trigger_MET = None
elif self.year == 2018:
trigger_SingleMu = any([event.HLT_Mu50,
event.HLT_TkMu100,
event.HLT_OldMu100])
trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT
trigger_SingleIsoEle = event.HLT_Ele32_WPTight_Gsf
trigger_SinglePhoton = event.HLT_Photon200
trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,
event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,
event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,
event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,
event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])
trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight,
event.HLT_PFMET120_PFMHT120_IDTight,
event.HLT_PFMET130_PFMHT130_IDTight,
event.HLT_PFMET140_PFMHT140_IDTight,
event.HLT_PFMETTypeOne110_PFMHT110_IDTight,
event.HLT_PFMETTypeOne120_PFMHT120_IDTight,
event.HLT_PFMETTypeOne130_PFMHT130_IDTight,
event.HLT_PFMETTypeOne140_PFMHT140_IDTight])
trigger_MET = any([event.HLT_PFMET200_NotCleaned,
event.HLT_PFMET200_HBHECleaned,
event.HLT_PFMET200_HBHE_BeamHaloCleaned,
event.HLT_PFMET250_HBHECleaned])
########## Gen Weight #########
if self.isMC:
self.GenWeight = -1. if event.genWeight < 0 else 1.
self.PUWeight = self.puTool.getWeight(event.Pileup_nTrueInt)
self.EventWeight *= self.GenWeight
self.EventWeight *= self.PUWeight
for i,weight in enumerate(event.LHEScaleWeight):
self.out.LHEScaleWeight_hist.Fill(i,weight)
for j,weight in enumerate(event.LHEPdfWeight):
self.out.LHEPdfWeight_hist.Fill(j,weight)
self.LHEScaleWeight = event.LHEScaleWeight
self.LHEPdfWeight = event.LHEPdfWeight
self.LHEWeight_originalXWGTUP = event.LHEWeight_originalXWGTUP
self.out.events.Fill(0.,self.GenWeight)
self.out.original.Fill(0.,event.LHEWeight_originalXWGTUP)
if self.year == 2016 or self.year == 2017:
self.PrefireWeight = event.PrefireWeight
self.PrefireWeightUp = event.PrefireWeight_Up
self.PrefireWeightDown = event.PrefireWeight_Down
if self.isData and event.PV_npvs == 0:
return False
if not self.isData:
self.out.pileup.Fill(event.Pileup_nTrueInt)
if event.Pileup_nTrueInt == 0:
return False
########### FatJet #########
for ifatjet in range(event.nFatJet):
fatjet_pt = event.FatJet_pt[ifatjet]
fatjet_eta = event.FatJet_eta[ifatjet]
fatjet_phi = event.FatJet_phi[ifatjet]
fatjet_mass = event.FatJet_mass[ifatjet]
fatjet_jetid = event.FatJet_jetId[ifatjet]
fatjet_tlv = ROOT.TLorentzVector()
fatjet_tlv.SetPtEtaPhiM(fatjet_pt, fatjet_eta, fatjet_phi, fatjet_mass)
if fatjet_pt > fatjet_pt_cut and abs(fatjet_eta) < fatjet_eta_cut:
fatjet_tlv_list.append(fatjet_tlv)
idx_fatjet.append(ifatjet)
if event.FatJet_tau1[ifatjet]==0:
fatjet_tau21_list.append(0)
fatjet_tau41_list.append(0)
fatjet_tau31_list.append(0)
else:
fatjet_tau21_list.append(event.FatJet_tau2[ifatjet]/event.FatJet_tau1[ifatjet])
fatjet_tau41_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau1[ifatjet])
fatjet_tau31_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau1[ifatjet])
if event.FatJet_tau2[ifatjet]==0:
fatjet_tau42_list.append(0)
fatjet_tau32_list.append(0)
else:
fatjet_tau42_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau2[ifatjet])
fatjet_tau32_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau2[ifatjet])
self.nFatJets = len(fatjet_tlv_list)
#stop if no suitable Fatjet
if len(fatjet_tlv_list) == 0:
return False
########### electrons ##########
for ielectron in range(event.nElectron):
electron_pt = event.Electron_pt[ielectron]
electron_eta = event.Electron_eta[ielectron]
electron_phi = event.Electron_phi[ielectron]
electron_mass = event.Electron_mass[ielectron]
electron_tlv = ROOT.TLorentzVector()
electron_tlv.SetPtEtaPhiM(electron_pt,electron_eta,electron_phi,electron_mass)
if electron_eta > -2.5 and electron_eta < -1.479 and electron_phi > -1.55 and electron_phi < -0.9:
if self.Ele_HEM15_16 == -1.:
self.Ele_HEM15_16 = 0.
self.Ele_HEM15_16 += electron_pt
if electron_pt > elec_pt_cut and abs(electron_eta) < elec_eta_cut:
idx_electrons.append(ielectron)
electrons_tlv_list.append(electron_tlv)
if event.Electron_cutBased[ielectron] >= 2:
idx_loose_electrons.append(ielectron)
loose_electrons_tlv_list.append(electron_tlv)
self.nElectrons = len(loose_electrons_tlv_list)
########### muons #########
for imuon in range(event.nMuon):
muon_pt = event.Muon_pt[imuon]
muon_eta = event.Muon_eta[imuon]
muon_phi = event.Muon_phi[imuon]
muon_mass = event.Muon_mass[imuon]
muon_tlv = ROOT.TLorentzVector()
muon_tlv.SetPtEtaPhiM(muon_pt, muon_eta, muon_phi, muon_mass)
if muon_pt > muon_pt_cut and abs(muon_eta) < muon_eta_cut:
idx_muons.append(imuon)
muons_tlv_list.append(muon_tlv)
if event.Muon_isPFcand[imuon] and struct.unpack('B',event.Muon_pfIsoId[imuon])[0]>=2 and (event.Muon_isGlobal[imuon] or event.Muon_isTracker[imuon]):
idx_loose_muons.append(imuon)
loose_muons_tlv_list.append(muon_tlv)
self.nMuons = len(loose_muons_tlv_list)
############ taus #########
for itau in range(event.nTau):
tau_pt = event.Tau_pt[itau]
tau_eta = event.Tau_eta[itau]
tau_phi = event.Tau_phi[itau]
tau_mass = event.Tau_mass[itau]
tau_tlv = ROOT.TLorentzVector()
tau_tlv.SetPtEtaPhiM(tau_pt, tau_eta, tau_phi, tau_mass)
if tau_pt > tau_pt_cut and abs(tau_eta) < tau_eta_cut:
cleanTau = True
for loose_electrons_tlv in loose_electrons_tlv_list:
if loose_electrons_tlv.DeltaR(tau_tlv) < 0.4:
cleanTau = False
for loose_muons_tlv in loose_muons_tlv_list:
if loose_muons_tlv.DeltaR(tau_tlv) < 0.4:
cleanTau = False
if cleanTau:
self.nTaus += 1
############ MET ##########
METx = 0.
METy = 0.
MET_tlv = ROOT.TLorentzVector()
MET_tlv.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi, event.PuppiMET_pt)
############ TTbar pT reweighting ########
if self.isMC and 'TT' in self.sample[0]:
Top1_pt, Top2_pt = getTTPt(event)
self.TopWeight = getTTptWeight(Top1_pt, Top2_pt)
############ ZtoEE ############
self.out.eecutflow.Fill(0.,self.EventWeight)
eecutflow_list.append(self.EventWeight)
maxZpt = -1.
Z_pt = -1.
Z_m = -1.
goodelectronpair = False
for i in idx_electrons:
for j in idx_electrons:
if i==j or event.Electron_charge[i] == event.Electron_charge[j]:
continue
eli_tlv = ROOT.TLorentzVector()
eli_tlv.SetPtEtaPhiM(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i],event.Electron_mass[i])
eli_v = ROOT.TVector3()
eli_v.SetPtEtaPhi(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i])
elj_tlv = ROOT.TLorentzVector()
elj_tlv.SetPtEtaPhiM(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j],event.Electron_mass[j])
elj_v = ROOT.TVector3()
elj_v.SetPtEtaPhi(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j])
diel = eli_tlv + elj_tlv
Z_pt = diel.Pt()
Z_m = diel.M()
if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:
maxZpt = Z_pt
if eli_tlv.Pt() > elj_tlv.Pt():
el1 = i
el2 = j
el1_tlv = eli_tlv
el2_tlv = elj_tlv
el1_v = eli_v
el2_v = elj_v
else:
el1 = j
el2 = i
el1_tlv = elj_tlv
el2_tlv = eli_tlv
el1_v = elj_v
el2_v = eli_v
goodelectronpair = True
if goodelectronpair:
self.out.eecutflow.Fill(1.,self.EventWeight)
eecutflow_list.append(self.EventWeight)
if el1_tlv.Pt() > elec1_pt_cut and el2_tlv.Pt() > elec2_pt_cut:
self.out.eecutflow.Fill(2.,self.EventWeight)
eecutflow_list.append(self.EventWeight)
if event.Electron_cutBased[el1] >= 2 and event.Electron_cutBased[el2] >= 2:
self.out.eecutflow.Fill(3.,self.EventWeight)
eecutflow_list.append(self.EventWeight)
if maxZpt > v_pt_cut:
self.out.eecutflow.Fill(4.,self.EventWeight)
eecutflow_list.append(self.EventWeight)
if trigger_SingleEle == None:
if not trigger_SingleIsoEle and not trigger_SinglePhoton:
print "ZtoEE trigger inconsistency"
return False
else:
if not trigger_SingleEle and not trigger_SingleIsoEle and not trigger_SinglePhoton:
print "ZtoEE trigger inconsistency"
return False
#if not self.isMC and ("SinglePhoton" in self.sample[0] and (trigger_SingleEle or trigger_SingleIsoEle)):
# print "ZtoEE double counting"
# return False
self.out.eecutflow.Fill(5.,self.EventWeight)
eecutflow_list.append(self.EventWeight)
if self.isMC:
eltrig_tlv = el1_tlv
#for i in range(event.nTrigObj):
# if event.TrigObj_id[i] ==11:
# trigobj_v = ROOT.TVector3()
# trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])
# print "electron TrigObj_filterBits:",event.TrigObj_filterBits[i]
# if event.TrigObj_filterBits[i]==14336:
# #if event.TrigObj_filterBits[i]==1110000000000000:
# print "found matching electron"
# deltaR1 = trigobj_v.DeltaR(el1_v)
# deltaR2 = trigobj_v.DeltaR(el2_v)
# if deltaR2 < deltaR1 and deltaR2 < 0.2:
# eltrig_tlv = el2_tlv
# break
self.TriggerWeight = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta())
self.TriggerWeightUp = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) + self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())
self.TriggerWeightDown = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) - self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())
self.LeptonWeight = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())*self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())
IdIsoSF1 = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())
IdIsoSF2 = self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())
IdIsoSF1error = self.elSFs.getIdIsoSFerror(el1_tlv.Pt(), el1_tlv.Eta())
IdIsoSF2error = self.elSFs.getIdIsoSFerror(el2_tlv.Pt(),el2_tlv.Eta())
self.LeptonWeight = IdIsoSF1*IdIsoSF2
LeptonWeightsigma = np.sqrt((IdIsoSF1error*IdIsoSF2)**2+(IdIsoSF2error*IdIsoSF1)**2)
self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma
self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma
if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:
GenVpt = getGenVpt(event)
self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)
self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)
self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)
self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr
self.EventWeight *= self.TriggerWeight
self.EventWeight *= self.LeptonWeight
V = el1_tlv + el2_tlv
self.Ele1_pt = el1_tlv.Pt()
self.Ele1_eta = el1_tlv.Eta()
self.Ele1_phi = el1_tlv.Phi()
self.Ele1_mass = el1_tlv.M()
self.Ele2_pt = el2_tlv.Pt()
self.Ele2_eta = el2_tlv.Eta()
self.Ele2_phi = el2_tlv.Phi()
self.Ele2_mass = el2_tlv.M()
self.isZtoEE = True
########## ZtoMM #############
self.out.mmcutflow.Fill(0.,self.EventWeight)
mmcutflow_list.append(self.EventWeight)
maxZpt = -1.
Z_pt = -1.
Z_m = -1.
goodmuonpair = False
for i in idx_muons:
for j in idx_muons:
if i==j or event.Muon_charge[i] == event.Muon_charge[j]:
continue
mui_tlv = ROOT.TLorentzVector()
mui_tlv.SetPtEtaPhiM(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i],event.Muon_mass[i])
mui_v = ROOT.TVector3()
mui_v.SetPtEtaPhi(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i])
muj_tlv = ROOT.TLorentzVector()
muj_tlv.SetPtEtaPhiM(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j],event.Muon_mass[j])
muj_v = ROOT.TVector3()
muj_v.SetPtEtaPhi(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j])
dimu = mui_tlv + muj_tlv
Z_pt = dimu.Pt()
Z_m = dimu.M()
if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:
maxZpt = Z_pt
if mui_tlv.Pt() > muj_tlv.Pt():
mu1 = i
mu2 = j
mu1_tlv = mui_tlv
mu2_tlv = muj_tlv
mu1_v = mui_v
mu2_v = muj_v
else:
mu1 = j
mu2 = i
mu1_tlv = muj_tlv
mu2_tlv = mui_tlv
mu1_v = muj_v
mu2_v = mui_v
goodmuonpair = True
if goodmuonpair:
self.out.mmcutflow.Fill(1.,self.EventWeight)
mmcutflow_list.append(self.EventWeight)
mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]
mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0]
if mu1_tlv.Pt() > muon1_pt_cut and mu2_tlv.Pt() > muon2_pt_cut:
self.out.mmcutflow.Fill(2.,self.EventWeight)
mmcutflow_list.append(self.EventWeight)
if (mu1_highPtId >= 2 and mu2_highPtId >= 1) or (mu1_highPtId >= 1 and mu2_highPtId >= 2):
self.out.mmcutflow.Fill(3.,self.EventWeight)
mmcutflow_list.append(self.EventWeight)
if maxZpt > v_pt_cut:
self.out.mmcutflow.Fill(4.,self.EventWeight)
mmcutflow_list.append(self.EventWeight)
if not trigger_SingleMu:
print "ZtoMM trigger inconsistency"
return False
self.out.mmcutflow.Fill(5.,self.EventWeight)
mmcutflow_list.append(self.EventWeight)
if self.isMC:
if mu1_highPtId >=2:
mutrig_tlv = mu1_tlv
else:
mutrig_tlv = mu2_tlv
#for i in range(event.nTrigObj):
# if event.TrigObj_id[i] ==13:
# trigobj_v = ROOT.TVector3()
# trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])
# deltaR1 = trigobj_v.DeltaR(mu1_v)
# deltaR2 = trigobj_v.DeltaR(mu2_v)
# print "muon TrigObj_filterBits:",event.TrigObj_filterBits[i]
# if event.TrigObj_filterBits[i]==2048:
# #if event.TrigObj_filterBits[i]==10000000000:
# print "found matching muon"
# if deltaR2 < deltaR1 and deltaR2 < 0.2:
# mutrig_tlv = mu2_tlv
# break
self.TriggerWeight = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta())
self.TriggerWeightUp = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) + self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())
self.TriggerWeightDown = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) - self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())
IdSF1 = self.muSFs.getIdSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)
IdSF2 = self.muSFs.getIdSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)
IsoSF1 = self.muSFs.getIsoSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)
IsoSF2 = self.muSFs.getIsoSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)
IdSF1error = self.muSFs.getIdSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)
IdSF2error = self.muSFs.getIdSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)
IsoSF1error = self.muSFs.getIsoSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)
IsoSF2error = self.muSFs.getIsoSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)
self.LeptonWeight = IdSF1*IdSF2*IsoSF1*IsoSF2
LeptonWeightsigma = np.sqrt((IdSF1error*IdSF2*IsoSF1*IsoSF2)**2+(IdSF2error*IdSF1*IsoSF1*IsoSF2)**2+(IsoSF1error*IdSF1*IdSF2*IsoSF2)**2+(IsoSF2error*IdSF1*IdSF2*IsoSF1)**2)
self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma
self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma
if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:
GenVpt = getGenVpt(event)
self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)
self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)
self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)
self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr
self.EventWeight *= self.TriggerWeight
self.EventWeight *= self.LeptonWeight
if mu1_tlv.DeltaR(mu2_tlv) < 0.3:
try:
self.Mu1_relIso = ((event.Muon_tkRelIso[mu1]*mu1_tlv.Pt()) - mu2_tlv.Pt())/mu1_tlv.Pt()
self.Mu2_relIso = ((event.Muon_tkRelIso[mu2]*mu2_tlv.Pt()) - mu1_tlv.Pt())/mu2_tlv.Pt()
except:
self.Mu1_relIso = -1.
self.Mu2_relIso = -1.
else:
try:
self.Mu1_relIso = event.Muon_tkRelIso[mu1]
self.Mu2_relIso = event.Muon_tkRelIso[mu2]
except:
self.Mu1_relIso = -1.
self.Mu2_relIso = -1.
V = mu1_tlv + mu2_tlv
self.Mu1_pt = mu1_tlv.Pt()
self.Mu1_eta = mu1_tlv.Eta()
self.Mu1_phi = mu1_tlv.Phi()
self.Mu1_mass = mu1_tlv.M()
self.Mu1_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu1])[0]
self.Mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]
self.Mu2_pt = mu2_tlv.Pt()
self.Mu2_eta = mu2_tlv.Eta()
self.Mu2_phi = mu2_tlv.Phi()
self.Mu2_mass = mu2_tlv.M()
self.Mu2_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu2])[0]
self.Mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0]
self.isZtoMM = True
########### TtoEM #########
if not self.isZtoMM and not self.isZtoEE and self.nElectrons == 1 and self.nMuons == 1:
if event.Electron_charge[idx_loose_electrons[0]] != event.Muon_charge[idx_loose_muons[0]]:
el_tlv = loose_electrons_tlv_list[0]
mu_tlv = loose_muons_tlv_list[0]
if mu_tlv.Pt() > 30. and el_tlv.Pt() > 30.:
V = mu_tlv + el_tlv
if V.Pt() > 50.:
if trigger_SingleEle == None:
if not trigger_SingleIsoEle:
print "TtoEM trigger inconsistency"
return False
else:
if not trigger_SingleEle and not trigger_SingleIsoEle:
print "TtoEM trigger inconsistency"
return False
if self.isMC:
self.TriggerWeight = self.elSFs.getTriggerSF(el_tlv.Pt(),el_tlv.Eta())
self.LeptonWeight = self.elSFs.getIdIsoSF(el_tlv.Pt(), el_tlv.Eta())
if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:
GenVpt = getGenVpt(event)
self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)
self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)
self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)
self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr
self.EventWeight *= self.TriggerWeight
self.EventWeight *= self.LeptonWeight
self.Mu1_pt = mu_tlv.Pt()
self.Mu1_eta = mu_tlv.Eta()
self.Mu1_phi = mu_tlv.Phi()
self.Mu1_mass = mu_tlv.M()
self.Ele1_pt = el_tlv.Pt()
self.Ele1_eta = el_tlv.Eta()
self.Ele1_phi = el_tlv.Phi()
self.Ele1_mass = el_tlv.M()
self.isTtoEM = True
######### ZtoNN ##########
self.out.nncutflow.Fill(0.,self.EventWeight)
nncutflow_list.append(self.EventWeight)
if not self.isZtoMM and not self.isZtoEE and not self.isTtoEM:
if event.PuppiMET_pt > met_pt_cut :
self.out.nncutflow.Fill(1.,self.EventWeight)
nncutflow_list.append(self.EventWeight)
if self.nElectrons == 0 and self.nMuons == 0 and self.nTaus == 0:
self.out.nncutflow.Fill(2.,self.EventWeight)
nncutflow_list.append(self.EventWeight)
V.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi,event.PuppiMET_pt)
V_chs.SetPtEtaPhiE(event.MET_pt,0.,event.MET_phi,event.MET_pt)
if trigger_MET == None:
if not self.isMC and not trigger_METMHT and not trigger_METMHTNoMu:
print "ZtoNN Trigger inconsistency"
return False
else:
if not self.isMC and not trigger_MET and not trigger_METMHT and not trigger_METMHTNoMu:
print "ZtoNN Trigger inconsistency"
return False
self.out.nncutflow.Fill(3.,self.EventWeight)
nncutflow_list.append(self.EventWeight)
if self.filter(event) == False:
print "Bad event"
return False
self.out.nncutflow.Fill(4.,self.EventWeight)
nncutflow_list.append(self.EventWeight)
if self.isMC:
if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:
GenVpt = getGenVpt(event)
self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)
self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)
self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)
self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr
self.TriggerWeight = 1.
self.isZtoNN = True
#stop if no semileptonic decays
if self.isZtoEE==False and self.isZtoMM==False and self.isZtoNN==False and self.isTtoEM==False:
return False
########## setting the Higgs and V index #######
fatjet_idx_H = 0
valid_Higgs = False
if self.isZtoMM:
fatjet_maxpt = 0.
for i,fatjet_tlv in enumerate(fatjet_tlv_list):
if fatjet_tlv.DeltaR(mu1_tlv)>0.8 and fatjet_tlv.DeltaR(mu2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:
fatjet_maxpt=fatjet_tlv.Pt()
fatjet_idx_H = i
valid_Higgs = True
if not valid_Higgs:
return False
elif self.isZtoEE:
fatjet_maxpt = 0.
for i,fatjet_tlv in enumerate(fatjet_tlv_list):
if fatjet_tlv.DeltaR(el1_tlv)>0.8 and fatjet_tlv.DeltaR(el2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:
fatjet_maxpt=fatjet_tlv.Pt()
fatjet_idx_H = i
valid_Higgs = True
if not valid_Higgs:
return False
elif self.isZtoNN:
fatjet_maxpt = 0.
for i,fatjet_tlv in enumerate(fatjet_tlv_list):
if fatjet_tlv.Pt()>fatjet_maxpt:
fatjet_maxpt=fatjet_tlv.Pt()
fatjet_idx_H = i
############ AK4 Jet ###########
for ijet in range(event.nJet):
jet_pt = event.Jet_pt[ijet]
jet_eta = event.Jet_eta[ijet]
jet_phi = event.Jet_phi[ijet]
jet_mass = event.Jet_mass[ijet]
jet_tlv = ROOT.TLorentzVector()
jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)
self.HT += jet_pt
if jet_eta > -2.5 and jet_eta < -1.479 and jet_phi > -1.55 and jet_phi < -0.9:
if self.HT_HEM15_16 == -1.:
self.HT_HEM15_16 = 0.
self.HT_HEM15_16 += jet_pt
if jet_pt > ak4_pt_cut and abs(jet_eta) < ak4_eta_cut:
cleanJet = True
for loose_electrons_tlv in loose_electrons_tlv_list:
if loose_electrons_tlv.DeltaR(jet_tlv) < 0.4:
cleanJet = False
for loose_muons_tlv in loose_muons_tlv_list:
if loose_muons_tlv.DeltaR(jet_tlv) < 0.4:
cleanJet = False
if cleanJet and getJetID(self.year,event,ijet):
if len(fatjet_tlv_list) > 0 and fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:
jet_tlv_list.append(jet_tlv)
idx_jet.append(ijet)
############ AK4 Jet check for VBF ###########
if self.isZtoMM:
lep1_tlv = mu1_tlv
lep2_tlv = mu2_tlv
if self.isZtoEE:
lep1_tlv = el1_tlv
lep2_tlv = el2_tlv
for ijet in range(event.nJet):
jet_pt = event.Jet_pt[ijet]
jet_eta = event.Jet_eta[ijet]
jet_phi = event.Jet_phi[ijet]
jet_mass = event.Jet_mass[ijet]
jet_tlv = ROOT.TLorentzVector()
jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)
if abs(jet_eta) < 5.0:
if len(fatjet_tlv_list) > 0:
if fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:
if getJetID(self.year,event,ijet) and event.Jet_puId[ijet]==7:
if self.isZtoMM or self.isZtoEE:
if jet_tlv.DeltaR(lep1_tlv)>0.4 and jet_tlv.DeltaR(lep2_tlv)>0.4:
jet_tlv_list_vbf.append(jet_tlv)
idx_jet_vbf.append(ijet)
elif self.isZtoNN:
jet_tlv_list_vbf.append(jet_tlv)
idx_jet_vbf.append(ijet)
idx1_vbf = -1
idx2_vbf = -1
maxVBFmass = -1.
for ijet1, jet1_tlv in enumerate(jet_tlv_list_vbf):
for ijet2, jet2_tlv in enumerate(jet_tlv_list_vbf):
if ijet1 == ijet2: continue
eta1 = jet_tlv_list_vbf[ijet1].Eta()
eta2 = jet_tlv_list_vbf[ijet2].Eta()
V_VBF = jet_tlv_list_vbf[ijet1]+jet_tlv_list_vbf[ijet2]
VBFmass = V_VBF.M()
if abs(eta1-eta2)>4.0 and eta1*eta2<0. and VBFmass>maxVBFmass:
idx1_vbf = ijet1
idx2_vbf = ijet2
maxVBFmass = VBFmass
self.dijet_VBF_mass = maxVBFmass
if maxVBFmass > 500.:
self.isVBF = True
self.Jet1_VBF_pt = jet_tlv_list_vbf[idx1_vbf].Pt()
self.Jet1_VBF_eta = jet_tlv_list_vbf[idx1_vbf].Eta()
self.Jet1_VBF_phi = jet_tlv_list_vbf[idx1_vbf].Phi()
self.Jet1_VBF_mass = jet_tlv_list_vbf[idx1_vbf].M()
self.Jet2_VBF_pt = jet_tlv_list_vbf[idx2_vbf].Pt()
self.Jet2_VBF_eta = jet_tlv_list_vbf[idx2_vbf].Eta()
self.Jet2_VBF_phi = jet_tlv_list_vbf[idx2_vbf].Phi()
self.Jet2_VBF_mass = jet_tlv_list_vbf[idx2_vbf].M()
self.deltaR_VBF = jet_tlv_list_vbf[idx1_vbf].DeltaR(jet_tlv_list_vbf[idx2_vbf])
self.deltaR_HVBFjet1 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx1_vbf]))
self.deltaR_HVBFjet2 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx2_vbf]))
########## Higgs ########
H = fatjet_tlv_list[fatjet_idx_H]
if self.runJEC:
self.H_mass_nom = event.FatJet_msoftdrop_nom[fatjet_idx_H]
self.H_mass_jmsUp = event.FatJet_msoftdrop_jmsUp[fatjet_idx_H]
self.H_mass_jmsDown = event.FatJet_msoftdrop_jmsDown[fatjet_idx_H]
self.H_mass_jmrUp = event.FatJet_msoftdrop_jmrUp[fatjet_idx_H]
self.H_mass_jmrDown = event.FatJet_msoftdrop_jmrDown[fatjet_idx_H]
self.H_pt_nom = event.FatJet_pt_nom[fatjet_idx_H]
self.H_pt_jesUp = event.FatJet_pt_jesTotalUp[fatjet_idx_H]
self.H_pt_jesDown = event.FatJet_pt_jesTotalDown[fatjet_idx_H]
self.H_pt_jerUp = event.FatJet_pt_jerUp[fatjet_idx_H]
self.H_pt_jerDown = event.FatJet_pt_jerDown[fatjet_idx_H]
self.PuppiMET_pt_nom = event.PuppiMET_pt_nom
self.PuppiMET_pt_jesUp = event.PuppiMET_pt_jesTotalUp
self.PuppiMET_pt_jesDown = event.PuppiMET_pt_jesTotalDown
self.PuppiMET_pt_jerUp = event.PuppiMET_pt_jerUp
self.PuppiMET_pt_jerDown = event.PuppiMET_pt_jerDown
H_Eta = H.Eta()
H_Phi = H.Phi()
H_M = H.M()
H_nom = ROOT.TLorentzVector()
H_jesUp = ROOT.TLorentzVector()
H_jesDown = ROOT.TLorentzVector()
H_jerUp = ROOT.TLorentzVector()
H_jerDown = ROOT.TLorentzVector()
H_nom.SetPtEtaPhiM(self.H_pt_nom,H_Eta,H_Phi,H_M)
H_jesUp.SetPtEtaPhiM(self.H_pt_jesUp,H_Eta,H_Phi,H_M)
H_jesDown.SetPtEtaPhiM(self.H_pt_jesDown,H_Eta,H_Phi,H_M)
H_jerUp.SetPtEtaPhiM(self.H_pt_jerUp,H_Eta,H_Phi,H_M)
H_jerDown.SetPtEtaPhiM(self.H_pt_jerDown,H_Eta,H_Phi,H_M)
MET_nom = ROOT.TLorentzVector()
MET_jesUp = ROOT.TLorentzVector()
MET_jesDown = ROOT.TLorentzVector()
MET_jerUp = ROOT.TLorentzVector()
MET_jerDown = ROOT.TLorentzVector()
MET_nom.SetPtEtaPhiM(self.PuppiMET_pt_nom,0.,event.PuppiMET_phi,self.PuppiMET_pt_nom)
MET_jesUp.SetPtEtaPhiM(self.PuppiMET_pt_jesUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesUp)
MET_jesDown.SetPtEtaPhiM(self.PuppiMET_pt_jesDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesDown)
MET_jerUp.SetPtEtaPhiM(self.PuppiMET_pt_jerUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerUp)
MET_jerDown.SetPtEtaPhiM(self.PuppiMET_pt_jerDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerDown)
for ifatjet in idx_fatjet:
if event.FatJet_btagHbb[ifatjet] > 0.3:
self.isBoosted4B = True
self.nJetsNoFatJet = len(jet_tlv_list)
if self.isZtoNN:
self.DPhi = abs(MET_tlv.DeltaPhi(H))
else:
self.DPhi = abs(V.DeltaPhi(H))
self.VH_deltaR = H.DeltaR(V)
jet_list_temp = []
for ijet in range(event.nJet):
jet_pt = event.Jet_pt[ijet]
jet_eta = event.Jet_eta[ijet]
jet_phi = event.Jet_phi[ijet]
jet_mass = event.Jet_mass[ijet]
jet_tlv = ROOT.TLorentzVector()
jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)
if jet_tlv.DeltaR(H) < 0.8:
jet_list_temp.append(ijet)
if len(jet_list_temp) == 1:
idx = jet_list_temp[0]
self.H_chf = event.Jet_chHEF[idx]
self.H_nhf = event.Jet_neHEF[idx]
elif len(jet_list_temp) == 2:
idx1 = jet_list_temp[0]
idx2 = jet_list_temp[1]
pt1 = event.Jet_pt[idx1]
pt2 = event.Jet_pt[idx2]
chf1 = event.Jet_chHEF[idx1]
chf2 = event.Jet_chHEF[idx2]
nhf1 = event.Jet_neHEF[idx1]
nhf2 = event.Jet_neHEF[idx2]
self.H_chf = (chf1*pt1+chf2*pt2)/(pt1+pt2)
self.H_nhf = (nhf1*pt1+nhf2*pt2)/(pt1+pt2)
elif len(jet_list_temp) == 3:
idx1 = jet_list_temp[0]
idx2 = jet_list_temp[1]
idx3 = jet_list_temp[2]
pt1 = event.Jet_pt[idx1]
pt2 = event.Jet_pt[idx2]
pt3 = event.Jet_pt[idx3]
chf1 = event.Jet_chHEF[idx1]
chf2 = event.Jet_chHEF[idx2]
chf3 = event.Jet_chHEF[idx3]
nhf1 = event.Jet_neHEF[idx1]
nhf2 = event.Jet_neHEF[idx2]
nhf3 = event.Jet_neHEF[idx3]
self.H_chf = (chf1*pt1+chf2*pt2+chf3*pt3)/(pt1+pt2+pt3)
self.H_nhf = (nhf1*pt1+nhf2*pt2+nhf3*pt3)/(pt1+pt2+pt3)
for jet_tlv in jet_tlv_list:
if abs(MET_tlv.DeltaPhi(jet_tlv)) < self.MinJetMetDPhi:
self.MinJetMetDPhi = abs(MET_tlv.DeltaPhi(jet_tlv))
for ijet in idx_jet:
if event.Jet_btagDeepB[ijet] > self.MaxJetNoFatJetBTag:
self.MaxJetNoFatJetBTag = event.Jet_btagDeepB[ijet]
if not self.isData:
for igenjet in range(event.nGenJetAK8):
genjetAK8_tlv = ROOT.TLorentzVector()
genjetAK8_tlv.SetPtEtaPhiM(event.GenJetAK8_pt[igenjet], event.GenJetAK8_eta[igenjet], event.GenJetAK8_phi[igenjet], event.GenJetAK8_mass[igenjet])
if H.DeltaR(genjetAK8_tlv) < 0.8:
self.H_hadronflavour = struct.unpack('B',event.GenJetAK8_hadronFlavour[igenjet])[0]
self.H_partonflavour = event.GenJetAK8_partonFlavour[igenjet]
self.btagToolAK4_deep.fillEfficiencies(event,idx_jet,fatjet_idx_H)
self.BTagAK4Weight_deep = self.btagToolAK4_deep.getWeight(event,idx_jet,fatjet_idx_H)
self.BTagAK4Weight_deep_up = self.btagToolAK4_deep_up.getWeight(event,idx_jet,fatjet_idx_H)
self.BTagAK4Weight_deep_down = self.btagToolAK4_deep_down.getWeight(event,idx_jet,fatjet_idx_H)
#search for AK4 jets which match with the subjets from the H
ak4_subjets = []
subjet1 = TLorentzVector()
subjet2 = TLorentzVector()
subjet1_idx = event.FatJet_subJetIdx1[fatjet_idx_H]
subjet2_idx = event.FatJet_subJetIdx2[fatjet_idx_H]
if subjet1_idx>=0. and subjet2_idx>=0.:
subjet1.SetPtEtaPhiM(event.SubJet_pt[subjet1_idx],event.SubJet_eta[subjet1_idx],event.SubJet_phi[subjet1_idx],event.SubJet_mass[subjet1_idx])
subjet2.SetPtEtaPhiM(event.SubJet_pt[subjet2_idx],event.SubJet_eta[subjet2_idx],event.SubJet_phi[subjet2_idx],event.SubJet_mass[subjet2_idx])
for jetid in range(event.nJet):
ak4jet = TLorentzVector()
ak4jet.SetPtEtaPhiM(event.Jet_pt[jetid],event.Jet_eta[jetid],event.Jet_phi[jetid],event.Jet_mass[jetid])
if ak4jet.DeltaR(subjet1)<0.4:
ak4_subjets.append(jetid)
if ak4jet.DeltaR(subjet2)<0.4:
ak4_subjets.append(jetid)
self.btagToolAK8_deep.fillEfficiencies(event,ak4_subjets,fatjet_idx_H)
self.BTagAK8Weight_deep = self.btagToolAK8_deep.getWeight(event,ak4_subjets,fatjet_idx_H)
self.BTagAK8Weight_deep_up = self.btagToolAK8_deep_up.getWeight(event,ak4_subjets,fatjet_idx_H)
self.BTagAK8Weight_deep_down = self.btagToolAK8_deep_down.getWeight(event,ak4_subjets,fatjet_idx_H)
########### X and variables ############
X = V + H
if self.isZtoNN:
X_chs = V_chs + H
self.X_mass_chs = X_chs.M()
if self.runJEC:
X_nom = V + H_nom
X_jesUp = V + H_jesUp
X_jesDown = V + H_jesDown
X_jerUp = V + H_jerUp
X_jerDown = V + H_jerDown
X_MET_nom = MET_nom + H_nom
X_MET_jesUp = MET_jesUp + H_jesUp
X_MET_jesDown = MET_jesDown + H_jesDown
X_MET_jerUp = MET_jerUp + H_jerUp
X_MET_jerDown = MET_jerDown + H_jerDown
self.X_mass_nom = X_nom.M()
self.X_mass_jesUp = X_jesUp.M()
self.X_mass_jesDown = X_jesDown.M()
self.X_mass_jerUp = X_jerUp.M()
self.X_mass_jerDown = X_jerDown.M()
self.X_mass_MET_nom = X_MET_nom.M()
self.X_mass_MET_jesUp = X_MET_jesUp.M()
self.X_mass_MET_jesDown = X_MET_jesDown.M()
self.X_mass_MET_jerUp = X_MET_jerUp.M()
self.X_mass_MET_jerDown = X_MET_jerDown.M()
self.V_pt = V.Pt()
self.V_eta = V.Eta()
self.V_phi = V.Phi()
self.V_mass = V.M()
if self.isZtoNN:
self.V_mass = 0.
self.H_pt = H.Pt()
self.H_eta = H.Eta()
self.H_phi = H.Phi()
self.H_M = H.M()
self.H_mass = event.FatJet_msoftdrop[fatjet_idx_H]
self.X_pt = X.Pt()
self.X_eta = X.Eta()
self.X_phi = X.Phi()
self.X_mass = X.M()
self.H_dbt = event.FatJet_btagHbb[fatjet_idx_H]
self.BtagDeepB = event.FatJet_btagDeepB[fatjet_idx_H]
self.DeepTagMD_H4qvsQCD = event.FatJet_deepTagMD_H4qvsQCD[fatjet_idx_H]
self.DeepTagMD_HbbvsQCD = event.FatJet_deepTagMD_HbbvsQCD[fatjet_idx_H]
self.DeepTagMD_ZHbbvsQCD = event.FatJet_deepTagMD_ZHbbvsQCD[fatjet_idx_H]
self.DeepTagMD_ZbbvsQCD = event.FatJet_deepTagMD_ZbbvsQCD[fatjet_idx_H]
self.DeepTagMD_bbvsLight = event.FatJet_deepTagMD_bbvsLight[fatjet_idx_H]
self.DeepTagMD_WvsQCD = event.FatJet_deepTagMD_WvsQCD[fatjet_idx_H]
self.DeepTagMD_ZvsQCD = event.FatJet_deepTagMD_ZvsQCD[fatjet_idx_H]
self.H_tau21 = fatjet_tau21_list[fatjet_idx_H]
self.H_tau41 = fatjet_tau41_list[fatjet_idx_H]
self.H_tau42 = fatjet_tau42_list[fatjet_idx_H]
self.H_tau31 = fatjet_tau31_list[fatjet_idx_H]
self.H_tau32 = fatjet_tau32_list[fatjet_idx_H]
self.VHDEta = abs(V.Eta() - H.Eta())
if event.FatJet_subJetIdx1[fatjet_idx_H] >= 0:
Hcsv1 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx1[fatjet_idx_H]]
Hdeepcsv1 = event.SubJet_btagDeepB[event.FatJet_subJetIdx1[fatjet_idx_H]]
else:
Hcsv1 = -1.
Hdeepcsv1 = -1.
if event.FatJet_subJetIdx2[fatjet_idx_H] >= 0:
Hcsv2 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx2[fatjet_idx_H]]
Hdeepcsv2 = event.SubJet_btagDeepB[event.FatJet_subJetIdx2[fatjet_idx_H]]
else:
Hcsv2 = -1.
Hdeepcsv2 = -1.
self.H_csv1 = max(Hcsv1,Hcsv2)
self.H_csv2 = min(Hcsv1,Hcsv2)
self.H_deepcsv1 = max(Hdeepcsv1,Hdeepcsv2)
self.H_deepcsv2 = min(Hdeepcsv1,Hdeepcsv2)
if self.year == 2016:
wp_loose = 0.2217
wp_medium = 0.6321
wp_tight = 0.8953
elif self.year == 2017:
wp_loose = 0.1522
wp_medium = 0.4941
wp_tight = 0.8001
elif self.year == 2018:
wp_loose = 0.1241
wp_medium = 0.4184
wp_tight = 0.7527
if self.H_deepcsv2 > wp_loose:
self.isHtobb = True
if self.H_deepcsv1 > wp_medium and self.H_deepcsv2 > wp_loose:
self.isHtobb_ml = True
if self.MaxJetNoFatJetBTag > wp_loose:
self.isMaxBTag_loose = True
if self.MaxJetNoFatJetBTag > wp_medium:
self.isMaxBTag_medium = True
if self.MaxJetNoFatJetBTag > wp_tight:
self.isMaxBTag_tight = True
if self.H_mass != 0.:
self.H_ddt = self.H_tau21 + 0.082 *np.log(self.H_mass*self.H_mass/self.H_pt)
else:
self.H_ddt = -1.
self.X_tmass = np.sqrt(2.*V.Pt()*fatjet_tlv_list[fatjet_idx_H].Pt()*(1.-np.cos(fatjet_tlv_list[fatjet_idx_H].DeltaPhi(V))))
if self.isZtoNN:
self.X_mass = self.X_tmass
else:
self.X_mass = X.M()
if self.X_mass > 750 and self.VH_deltaR > 2:
if self.MinJetMetDPhi>0.5 and self.DPhi>2:
for i,weight in enumerate(nncutflow_list):
self.out.nncutflow_inc.Fill(i,weight)
if self.VHDEta<1.3:
for i,weight in enumerate(eecutflow_list):
self.out.eecutflow_inc.Fill(i,weight)
for i,weight in enumerate(mmcutflow_list):
self.out.mmcutflow_inc.Fill(i,weight)
if self.isZtoEE or self.isZtoMM or self.isZtoNN or self.isTtoEM:
self.fillBranches(event)
return True
|
normal
|
{
"blob_id": "1721bba2cae1e330bffeb9df05341df9522ff885",
"index": 4394,
"step-1": "import ROOT\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection \nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\n\nfrom TreeProducer import *\nfrom TreeProducerCommon import *\nfrom CorrectionTools.PileupWeightTool import *\nfrom CorrectionTools.BTaggingTool import BTagWeightTool, BTagWPs\nfrom CorrectionTools.MuonSFs import *\nfrom CorrectionTools.ElectronSFs import *\nfrom CorrectionTools.RecoilCorrectionTool import getTTptWeight, getTTPt\nfrom CorrectionTools.DYCorrection import *\nimport struct\nimport numpy as np\n\nclass LLProducer(Module):\n\n def __init__(self, name, DataType, filelist, **kwargs):\n \n self.name = name\n self.out = TreeProducer(name)\n self.sample = filelist\n\n if DataType=='data':\n self.isData = True\n self.isMC = False\n else:\n self.isData = False\n self.isMC = True\n self.year = kwargs.get('year', 2017 )\n self.tes = kwargs.get('tes', 1.0 )\n self.ltf = kwargs.get('ltf', 1.0 )\n self.jtf = kwargs.get('jtf', 1.0 )\n year = self.year\n self.filter = getMETFilters(year,self.isData)\n if not self.isData:\n self.muSFs = MuonSFs(year=year)\n self.elSFs = ElectronSFs(year=year)\n self.puTool = PileupWeightTool(year =year)\n self.btagToolAK8_deep = BTagWeightTool('DeepCSV','AK8','loose',sigma='central',channel='ll',year=year)\n self.btagToolAK8_deep_up = BTagWeightTool('DeepCSV','AK8','loose',sigma='up',channel='ll',year=year)\n self.btagToolAK8_deep_down = BTagWeightTool('DeepCSV','AK8','loose',sigma='down',channel='ll',year=year)\n self.btagToolAK4_deep = BTagWeightTool('DeepCSV','AK4','loose',sigma='central',channel='ll',year=year)\n self.btagToolAK4_deep_up = BTagWeightTool('DeepCSV','AK4','loose',sigma='up',channel='ll',year=year)\n self.btagToolAK4_deep_down = BTagWeightTool('DeepCSV','AK4','loose',sigma='down',channel='ll',year=year)\n if 'DYJetsToLL' in self.sample[0]:\n self.DYCorr = DYCorrection('DYJetsToLL')\n elif 'ZJetsToNuNu' in self.sample[0]:\n self.DYCorr = DYCorrection('ZJetsToNuNu')\n elif 'WJetsToLNu' in self.sample[0]:\n self.DYCorr = DYCorrection('WJetsToLNu')\n self.runJEC = False\n JEC_samples = ['Zprime','WWTo','WZTo','ZZTo','GluGluHToBB','ZH_HToBB','Wplus','Wminus']\n for JEC_sample in JEC_samples:\n if self.sample[0].find(JEC_sample)>0:\n self.runJEC = True \n def beginJob(self):\n pass\n\n def endJob(self):\n if not self.isData:\n self.btagToolAK8_deep.setDirectory(self.out.outputfile,'AK8btag_deep')\n self.btagToolAK4_deep.setDirectory(self.out.outputfile,'AK4btag_deep')\n self.out.outputfile.Write()\n self.out.outputfile.Close()\n\n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n pass\n\n\n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree): \n pass\n \n def fillBranches(self,event):\n self.out.isMC[0] = self.isMC\n self.out.is2016[0] = self.is2016\n self.out.is2017[0] = self.is2017\n self.out.is2018[0] = self.is2018\n self.out.EventNumber[0] = event.event\n self.out.LumiNumber[0] = event.luminosityBlock\n self.out.RunNumber[0] = event.run\n self.out.EventWeight[0] = self.EventWeight\n self.out.TopWeight[0] = self.TopWeight\n self.out.BTagAK8Weight[0] = self.BTagAK8Weight\n self.out.BTagAK4Weight[0] = self.BTagAK4Weight\n self.out.BTagAK8Weight_deep[0] = self.BTagAK8Weight_deep\n self.out.BTagAK8Weight_deep_up[0] = self.BTagAK8Weight_deep_up\n self.out.BTagAK8Weight_deep_down[0] = self.BTagAK8Weight_deep_down\n self.out.BTagAK4Weight_deep[0] = self.BTagAK4Weight_deep\n self.out.BTagAK4Weight_deep_up[0] = self.BTagAK4Weight_deep_up\n self.out.BTagAK4Weight_deep_down[0] = self.BTagAK4Weight_deep_down\n self.out.BBTagWeight[0] = self.BBTagWeight\n self.out.GenWeight[0] = self.GenWeight\n self.out.PUWeight[0] = self.PUWeight\n self.out.LeptonWeight[0] = self.LeptonWeight\n self.out.LeptonWeightUp[0] = self.LeptonWeightUp\n self.out.LeptonWeightDown[0] = self.LeptonWeightDown\n self.out.TriggerWeight[0] = self.TriggerWeight\n self.out.TriggerWeightUp[0] = self.TriggerWeightUp\n self.out.TriggerWeightDown[0] = self.TriggerWeightDown\n self.out.QCDNLO_Corr[0] = self.QCDNLO_Corr\n self.out.QCDNNLO_Corr[0] = self.QCDNNLO_Corr\n self.out.EWKNLO_Corr[0] = self.EWKNLO_Corr\n self.out.isZtoNN[0] = self.isZtoNN\n self.out.isZtoEE[0] = self.isZtoEE\n self.out.isZtoMM[0] = self.isZtoMM\n self.out.isTtoEM[0] = self.isTtoEM\n self.out.isBoosted4B[0] = self.isBoosted4B\n self.out.isHtobb[0] = self.isHtobb\n self.out.isHtobb_ml[0] = self.isHtobb_ml\n self.out.isMaxBTag_loose[0] = self.isMaxBTag_loose\n self.out.isMaxBTag_medium[0] = self.isMaxBTag_medium\n self.out.isMaxBTag_tight[0] = self.isMaxBTag_tight\n self.out.isVBF[0] = self.isVBF\n self.out.nPV[0] = event.PV_npvsGood\n self.out.nTaus[0] = self.nTaus\n self.out.nElectrons[0] = self.nElectrons\n self.out.nMuons[0] = self.nMuons\n self.out.nJets[0] = self.nJetsNoFatJet\n self.out.nFatJets[0] = self.nFatJets\n self.out.DPhi[0] = self.DPhi\n self.out.DEta[0] = self.VHDEta\n self.out.MinDPhi[0] = self.MinJetMetDPhi\n self.out.MaxBTag[0] = self.MaxJetNoFatJetBTag\n self.out.BtagDeepB[0] = self.BtagDeepB\n self.out.DeepTagMD_H4qvsQCD[0] = self.DeepTagMD_H4qvsQCD\n self.out.DeepTagMD_HbbvsQCD[0] = self.DeepTagMD_HbbvsQCD\n self.out.DeepTagMD_ZHbbvsQCD[0] = self.DeepTagMD_ZHbbvsQCD\n self.out.DeepTagMD_ZbbvsQCD[0] = self.DeepTagMD_ZbbvsQCD\n self.out.DeepTagMD_bbvsLight[0] = self.DeepTagMD_bbvsLight\n self.out.DeepTagMD_WvsQCD[0] = self.DeepTagMD_WvsQCD\n self.out.DeepTagMD_ZvsQCD[0] = self.DeepTagMD_ZvsQCD\n self.out.Mu1_pt[0] = self.Mu1_pt\n self.out.Mu1_eta[0] = self.Mu1_eta\n self.out.Mu1_phi[0] = self.Mu1_phi\n self.out.Mu1_mass[0] = self.Mu1_mass\n self.out.Mu1_pfIsoId[0] = self.Mu1_pfIsoId\n self.out.Mu1_relIso[0] = self.Mu1_relIso\n self.out.Mu1_highPtId[0] = self.Mu1_highPtId\n self.out.Mu2_pt[0] = self.Mu2_pt\n self.out.Mu2_eta[0] = self.Mu2_eta\n self.out.Mu2_phi[0] = self.Mu2_phi\n self.out.Mu2_mass[0] = self.Mu2_mass\n self.out.Mu2_pfIsoId[0] = self.Mu2_pfIsoId\n self.out.Mu2_relIso[0] = self.Mu2_relIso\n self.out.Mu2_highPtId[0] = self.Mu2_highPtId\n self.out.Ele1_pt[0] = self.Ele1_pt\n self.out.Ele1_eta[0] = self.Ele1_eta\n self.out.Ele1_phi[0] = self.Ele1_phi\n self.out.Ele1_mass[0] = self.Ele1_mass\n self.out.Ele2_pt[0] = self.Ele2_pt\n self.out.Ele2_eta[0] = self.Ele2_eta\n self.out.Ele2_phi[0] = self.Ele2_phi\n self.out.Ele2_mass[0] = self.Ele2_mass\n self.out.Ele_HEM15_16[0] = self.Ele_HEM15_16\n self.out.Jet1_VBF_pt[0] = self.Jet1_VBF_pt\n self.out.Jet1_VBF_eta[0] = self.Jet1_VBF_eta\n self.out.Jet1_VBF_phi[0] = self.Jet1_VBF_phi\n self.out.Jet1_VBF_mass[0] = self.Jet1_VBF_mass\n self.out.Jet2_VBF_pt[0] = self.Jet2_VBF_pt\n self.out.Jet2_VBF_eta[0] = self.Jet2_VBF_eta\n self.out.Jet2_VBF_phi[0] = self.Jet2_VBF_phi\n self.out.Jet2_VBF_mass[0] = self.Jet2_VBF_mass\n self.out.dijet_VBF_mass[0] = self.dijet_VBF_mass \n self.out.deltaR_VBF[0] = self.deltaR_VBF \n self.out.deltaR_HVBFjet1[0] = self.deltaR_HVBFjet1\n self.out.deltaR_HVBFjet2[0] = self.deltaR_HVBFjet2\n self.out.MET[0] = event.PuppiMET_pt\n self.out.MET_chs[0] = event.MET_pt\n self.out.HT_HEM15_16[0] = self.HT_HEM15_16\n self.out.LHEScaleWeight = self.LHEScaleWeight\n self.out.LHEPdfWeight = self.LHEPdfWeight\n self.out.LHEWeight_originalXWGTUP[0]= self.LHEWeight_originalXWGTUP\n self.out.PrefireWeight[0] = self.PrefireWeight\n self.out.PrefireWeightUp[0] = self.PrefireWeightUp\n self.out.PrefireWeightDown[0] = self.PrefireWeightDown\n self.out.HT[0] = self.HT\n self.out.H_pt[0] = self.H_pt\n self.out.H_eta[0] = self.H_eta\n self.out.H_phi[0] = self.H_phi\n self.out.H_mass[0] = self.H_mass\n self.out.H_M[0] = self.H_M\n self.out.H_tau21[0] = self.H_tau21\n self.out.H_tau41[0] = self.H_tau41\n self.out.H_tau42[0] = self.H_tau42\n self.out.H_tau31[0] = self.H_tau31\n self.out.H_tau32[0] = self.H_tau32\n self.out.H_ddt[0] = self.H_ddt\n self.out.H_csv1[0] = self.H_csv1\n self.out.H_csv2[0] = self.H_csv2\n self.out.H_deepcsv1[0] = self.H_deepcsv1\n self.out.H_deepcsv2[0] = self.H_deepcsv2\n self.out.H_dbt[0] = self.H_dbt\n self.out.H_hadronflavour[0] = self.H_hadronflavour\n self.out.H_partonflavour[0] = self.H_partonflavour\n self.out.H_chf[0] = self.H_chf\n self.out.H_nhf[0] = self.H_nhf\n self.out.V_pt[0] = self.V_pt\n self.out.V_eta[0] = self.V_eta\n self.out.V_phi[0] = self.V_phi\n self.out.V_mass[0] = self.V_mass\n self.out.VH_deltaR[0] = self.VH_deltaR\n self.out.X_pt[0] = self.X_pt\n self.out.X_eta[0] = self.X_eta\n self.out.X_phi[0] = self.X_phi\n self.out.X_mass[0] = self.X_mass\n self.out.X_mass_chs[0] = self.X_mass_chs\n self.out.X_mass_nom[0] = self.X_mass_nom\n self.out.X_mass_jesUp[0] = self.X_mass_jesUp\n self.out.X_mass_jesDown[0] = self.X_mass_jesDown\n self.out.X_mass_jerUp[0] = self.X_mass_jerUp\n self.out.X_mass_jerDown[0] = self.X_mass_jerDown\n self.out.X_mass_MET_nom[0] = self.X_mass_MET_nom\n self.out.X_mass_MET_jesUp[0] = self.X_mass_MET_jesUp\n self.out.X_mass_MET_jesDown[0] = self.X_mass_MET_jesDown\n self.out.X_mass_MET_jerUp[0] = self.X_mass_MET_jerUp\n self.out.X_mass_MET_jerDown[0] = self.X_mass_MET_jerDown\n self.out.H_mass_nom[0] = self.H_mass_nom\n self.out.H_mass_jmsUp[0] = self.H_mass_jmsUp\n self.out.H_mass_jmsDown[0] = self.H_mass_jmsDown\n self.out.H_mass_jmrUp[0] = self.H_mass_jmrUp\n self.out.H_mass_jmrDown[0] = self.H_mass_jmrDown\n self.out.tree.Fill()\n\n\n\n def analyze(self, event):\n \"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n ##### set variables ####\n self.nElectrons = 0\n self.nMuons = 0\n self.nTaus = 0\n self.nFatJets = 0\n self.EventWeight = 1.\n self.TopWeight = 1.\n self.BTagAK8Weight = 1.\n self.BTagAK4Weight = 1.\n self.BTagAK8Weight_deep = 1.\n self.BTagAK8Weight_deep_up = 1.\n self.BTagAK8Weight_deep_down = 1.\n self.BTagAK4Weight_deep = 1.\n self.BTagAK4Weight_deep_up = 1.\n self.BTagAK4Weight_deep_down = 1.\n self.BBTagWeight = 1.\n self.GenWeight = 1.\n self.PUWeight = 1.\n self.LeptonWeight = 1.\n self.LeptonWeightUp = 1.\n self.LeptonWeightDown = 1.\n self.TriggerWeight = 1.\n self.TriggerWeightUp = 1.\n self.TriggerWeightDown = 1.\n self.isZtoMM = False\n self.isZtoEE = False\n self.isZtoNN = False\n self.isTtoEM = False\n self.isBoosted4B = False\n self.isHtobb = False\n self.isHtobb_ml = False\n self.isMaxBTag_loose = False\n self.isMaxBTag_medium = False\n self.isMaxBTag_tight = False\n self.isVBF = False\n self.is2016 = False\n self.is2017 = False\n self.is2018 = False\n self.nTaus = 0\n self.nJetsNoFatJet = 0\n self.H_partonflavour = -1.\n self.H_hadronflavour = -1.\n self.DPhi = -1.\n self.VHDEta = -1.\n self.MinJetMetDPhi = 10.\n self.MaxJetNoFatJetBTag = -1.\n self.BtagDeepB = -1.\n self.DeepTagMD_H4qvsQCD = -1.\n self.DeepTagMD_HbbvsQCD = -1.\n self.DeepTagMD_ZHbbvsQCD = -1.\n self.DeepTagMD_ZbbvsQCD = -1.\n self.DeepTagMD_bbvsLight = -1.\n self.DeepTagMD_WvsQCD = -1.\n self.DeepTagMD_ZvsQCD = -1.\n self.Mu1_pt = -1.\n self.Mu1_eta = -1.\n self.Mu1_phi = -1.\n self.Mu1_mass = -1.\n self.Mu1_pfIsoId = -1.\n self.Mu1_relIso = -1.\n self.Mu1_highPtId = -1.\n self.Mu2_pt = -1.\n self.Mu2_eta = -1.\n self.Mu2_phi = -1.\n self.Mu2_mass = -1.\n self.Mu2_pfIsoId = -1.\n self.Mu2_relIso = -1.\n self.Mu2_highPtId = -1.\n self.Ele1_pt = -1.\n self.Ele1_eta = -1.\n self.Ele1_phi = -1.\n self.Ele1_mass = -1.\n self.Ele2_pt = -1.\n self.Ele2_eta = -1.\n self.Ele2_phi = -1.\n self.Ele2_mass = -1.\n self.Ele_HEM15_16 = -1.\n self.HT_HEM15_16 = -1.\n self.HT = 0.\n self.LHEScaleWeight = -1.\n self.LHEPdfWeight = -1.\n self.LHEWeight_originalXWGTUP = -1.\n self.PrefireWeight = 1.\n self.PrefireWeightUp = 1.\n self.PrefireWeightDown = 1.\n self.QCDNLO_Corr = 1.\n self.QCDNNLO_Corr = 1.\n self.EWKNLO_Corr = 1.\n self.Jet1_VBF_pt = -1.\n self.Jet1_VBF_eta = -1.\n self.Jet1_VBF_phi = -1.\n self.Jet1_VBF_mass = -1.\n self.Jet2_VBF_pt = -1.\n self.Jet2_VBF_eta = -1.\n self.Jet2_VBF_phi = -1.\n self.Jet2_VBF_mass = -1.\n self.dijet_VBF_mass = -1.\n self.deltaR_VBF = -1.\n self.deltaR_HVBFjet1 = -1.\n self.deltaR_HVBFjet2 = -1.\n self.H_pt = -1.\n self.H_eta = -1.\n self.H_phi = -1.\n self.H_mass = -1.\n self.H_M = -1.\n self.H_tau21 = -1.\n self.H_tau41 = -1.\n self.H_tau42 = -1.\n self.H_tau31 = -1.\n self.H_tau32 = -1.\n self.H_ddt = -1.\n self.H_csv1 = -1.\n self.H_csv2 = -1.\n self.H_deepcsv1 = -1.\n self.H_deepcsv2 = -1.\n self.H_dbt = -1.\n self.H_chf = -1.\n self.H_nhf = -1.\n self.V_pt = -1.\n self.V_eta = -1.\n self.V_phi = -1.\n self.V_mass = -1.\n self.VH_deltaR = -1.\n self.X_pt = -1.\n self.X_eta = -1.\n self.X_phi = -1.\n self.X_mass = -1.\n self.X_mass_chs = -1.\n self.X_mass_nom = -1.\n self.X_mass_jesUp = -1.\n self.X_mass_jesDown = -1.\n self.X_mass_jerUp = -1.\n self.X_mass_jerDown = -1.\n self.X_mass_MET_nom = -1.\n self.X_mass_MET_jesUp = -1.\n self.X_mass_MET_jesDown = -1.\n self.X_mass_MET_jerUp = -1.\n self.X_mass_MET_jerDown = -1.\n self.H_mass_nom = -1.\n self.H_mass_jmsUp = -1.\n self.H_mass_jmsDown = -1.\n self.H_mass_jmrUp = -1.\n self.H_mass_jmrDown = -1.\n\n \n \n eecutflow_list = []\n mmcutflow_list = []\n nncutflow_list = []\n\n idx_electrons = []\n idx_loose_electrons = []\n idx_muons = []\n idx_loose_muons = []\n idx_fatjet = []\n idx_jet = []\n idx_jet_vbf = []\n\n electrons_tlv_list = []\n loose_electrons_tlv_list = []\n muons_tlv_list = []\n loose_muons_tlv_list = []\n fatjet_tlv_list = []\n jet_tlv_list = []\n jet_tlv_list_vbf = []\n fatjet_tau21_list = []\n fatjet_tau41_list = []\n fatjet_tau42_list = []\n fatjet_tau31_list = []\n fatjet_tau32_list = []\n\n V = ROOT.TLorentzVector()\n H = ROOT.TLorentzVector()\n X = ROOT.TLorentzVector()\n\n V_chs = ROOT.TLorentzVector()\n ######### cuts #########\n elec1_pt_cut = 55.\n elec2_pt_cut = 20.\n elec_pt_cut = 10.\n elec_eta_cut = 2.5\n muon1_pt_cut = 55.\n muon2_pt_cut = 20. \n muon_pt_cut = 10.\n muon_eta_cut = 2.4\n tau_pt_cut = 18.\n tau_eta_cut = 2.3\n ak4_pt_cut = 30.\n ak4_eta_cut = 2.4\n fatjet_pt_cut = 200.\n fatjet_eta_cut = 2.4\n met_pt_cut = 250.\n v_pt_cut = 200.\n tau21_lowercut = 0.35\n tau21_uppercut = 0.75\n j_mass_lowercut = 30.\n j_mass_uppercut = 250.\n v_mass_lowercut = 65.\n v_mass_intercut = 85.\n v_mass_uppercut = 105.\n h_mass_lowercut = 105.\n h_mass_uppercut = 135.\n x_mass_lowercut = 750.\n xt_mass_lowercut = 650.\n xjj_mass_lowercut = 950.\n \n #### flag for year #######\n if self.year == 2016:\n self.is2016 = True\n elif self.year == 2017:\n self.is2017 = True\n elif self.year == 2018:\n self.is2018 = True\n \n \n ######### triggers #########\n if self.year == 2016:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu50])\n except:\n trigger_SingleMu = event.HLT_Mu50\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele27_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon175\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight])\n trigger_MET = any([event.HLT_PFMET170_NotCleaned,\n event.HLT_PFMET170_HBHECleaned])\n elif self.year == 2017:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n except:\n trigger_SingleMu = event.HLT_Mu50\n try:\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n except:\n trigger_SingleEle = None\n trigger_SingleIsoEle = event.HLT_Ele35_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n try:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n except:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n try:\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n except:\n trigger_MET = None\n\n elif self.year == 2018:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele32_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n ########## Gen Weight #########\n if self.isMC:\n self.GenWeight = -1. if event.genWeight < 0 else 1.\n self.PUWeight = self.puTool.getWeight(event.Pileup_nTrueInt)\n self.EventWeight *= self.GenWeight\n self.EventWeight *= self.PUWeight\n for i,weight in enumerate(event.LHEScaleWeight):\n self.out.LHEScaleWeight_hist.Fill(i,weight)\n for j,weight in enumerate(event.LHEPdfWeight):\n self.out.LHEPdfWeight_hist.Fill(j,weight)\n self.LHEScaleWeight = event.LHEScaleWeight\n self.LHEPdfWeight = event.LHEPdfWeight\n self.LHEWeight_originalXWGTUP = event.LHEWeight_originalXWGTUP\n self.out.events.Fill(0.,self.GenWeight)\n self.out.original.Fill(0.,event.LHEWeight_originalXWGTUP)\n if self.year == 2016 or self.year == 2017:\n self.PrefireWeight = event.PrefireWeight\n self.PrefireWeightUp = event.PrefireWeight_Up\n self.PrefireWeightDown = event.PrefireWeight_Down\n \n if self.isData and event.PV_npvs == 0:\n return False\n if not self.isData:\n self.out.pileup.Fill(event.Pileup_nTrueInt)\n if event.Pileup_nTrueInt == 0:\n return False\n ########### FatJet #########\n for ifatjet in range(event.nFatJet):\n fatjet_pt = event.FatJet_pt[ifatjet]\n fatjet_eta = event.FatJet_eta[ifatjet]\n fatjet_phi = event.FatJet_phi[ifatjet]\n fatjet_mass = event.FatJet_mass[ifatjet]\n fatjet_jetid = event.FatJet_jetId[ifatjet]\n fatjet_tlv = ROOT.TLorentzVector()\n fatjet_tlv.SetPtEtaPhiM(fatjet_pt, fatjet_eta, fatjet_phi, fatjet_mass)\n if fatjet_pt > fatjet_pt_cut and abs(fatjet_eta) < fatjet_eta_cut:\n fatjet_tlv_list.append(fatjet_tlv)\n idx_fatjet.append(ifatjet)\n if event.FatJet_tau1[ifatjet]==0:\n fatjet_tau21_list.append(0)\n fatjet_tau41_list.append(0)\n fatjet_tau31_list.append(0)\n else:\n fatjet_tau21_list.append(event.FatJet_tau2[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau41_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau31_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau1[ifatjet])\n if event.FatJet_tau2[ifatjet]==0:\n fatjet_tau42_list.append(0)\n fatjet_tau32_list.append(0)\n else:\n fatjet_tau42_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau2[ifatjet])\n fatjet_tau32_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau2[ifatjet])\n self.nFatJets = len(fatjet_tlv_list)\n #stop if no suitable Fatjet\n if len(fatjet_tlv_list) == 0:\n return False \n ########### electrons ##########\n for ielectron in range(event.nElectron):\n electron_pt = event.Electron_pt[ielectron]\n electron_eta = event.Electron_eta[ielectron]\n electron_phi = event.Electron_phi[ielectron]\n electron_mass = event.Electron_mass[ielectron]\n electron_tlv = ROOT.TLorentzVector()\n electron_tlv.SetPtEtaPhiM(electron_pt,electron_eta,electron_phi,electron_mass)\n if electron_eta > -2.5 and electron_eta < -1.479 and electron_phi > -1.55 and electron_phi < -0.9:\n if self.Ele_HEM15_16 == -1.:\n self.Ele_HEM15_16 = 0.\n self.Ele_HEM15_16 += electron_pt\n if electron_pt > elec_pt_cut and abs(electron_eta) < elec_eta_cut:\n idx_electrons.append(ielectron)\n electrons_tlv_list.append(electron_tlv)\n if event.Electron_cutBased[ielectron] >= 2:\n idx_loose_electrons.append(ielectron)\n loose_electrons_tlv_list.append(electron_tlv)\n self.nElectrons = len(loose_electrons_tlv_list)\n \n ########### muons #########\n for imuon in range(event.nMuon):\n muon_pt = event.Muon_pt[imuon]\n muon_eta = event.Muon_eta[imuon]\n muon_phi = event.Muon_phi[imuon]\n muon_mass = event.Muon_mass[imuon]\n muon_tlv = ROOT.TLorentzVector()\n muon_tlv.SetPtEtaPhiM(muon_pt, muon_eta, muon_phi, muon_mass)\n if muon_pt > muon_pt_cut and abs(muon_eta) < muon_eta_cut:\n idx_muons.append(imuon)\n muons_tlv_list.append(muon_tlv)\n if event.Muon_isPFcand[imuon] and struct.unpack('B',event.Muon_pfIsoId[imuon])[0]>=2 and (event.Muon_isGlobal[imuon] or event.Muon_isTracker[imuon]):\n idx_loose_muons.append(imuon)\n loose_muons_tlv_list.append(muon_tlv)\n self.nMuons = len(loose_muons_tlv_list)\n\n\n ############ taus #########\n for itau in range(event.nTau):\n tau_pt = event.Tau_pt[itau]\n tau_eta = event.Tau_eta[itau]\n tau_phi = event.Tau_phi[itau]\n tau_mass = event.Tau_mass[itau]\n tau_tlv = ROOT.TLorentzVector()\n tau_tlv.SetPtEtaPhiM(tau_pt, tau_eta, tau_phi, tau_mass)\n if tau_pt > tau_pt_cut and abs(tau_eta) < tau_eta_cut:\n cleanTau = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n if cleanTau:\n self.nTaus += 1\n\n ############ MET ##########\n METx = 0.\n METy = 0.\n MET_tlv = ROOT.TLorentzVector()\n MET_tlv.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi, event.PuppiMET_pt)\n \n ############ TTbar pT reweighting ########\n if self.isMC and 'TT' in self.sample[0]:\n Top1_pt, Top2_pt = getTTPt(event)\n self.TopWeight = getTTptWeight(Top1_pt, Top2_pt)\n\n ############ ZtoEE ############\n self.out.eecutflow.Fill(0.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodelectronpair = False\n for i in idx_electrons:\n for j in idx_electrons:\n if i==j or event.Electron_charge[i] == event.Electron_charge[j]:\n continue\n eli_tlv = ROOT.TLorentzVector()\n eli_tlv.SetPtEtaPhiM(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i],event.Electron_mass[i])\n eli_v = ROOT.TVector3()\n eli_v.SetPtEtaPhi(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i])\n elj_tlv = ROOT.TLorentzVector()\n elj_tlv.SetPtEtaPhiM(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j],event.Electron_mass[j])\n elj_v = ROOT.TVector3()\n elj_v.SetPtEtaPhi(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j])\n diel = eli_tlv + elj_tlv\n Z_pt = diel.Pt()\n Z_m = diel.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if eli_tlv.Pt() > elj_tlv.Pt():\n el1 = i\n el2 = j\n el1_tlv = eli_tlv\n el2_tlv = elj_tlv\n el1_v = eli_v\n el2_v = elj_v\n else:\n el1 = j\n el2 = i\n el1_tlv = elj_tlv\n el2_tlv = eli_tlv\n el1_v = elj_v\n el2_v = eli_v\n goodelectronpair = True\n \n \n if goodelectronpair:\n self.out.eecutflow.Fill(1.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if el1_tlv.Pt() > elec1_pt_cut and el2_tlv.Pt() > elec2_pt_cut:\n self.out.eecutflow.Fill(2.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if event.Electron_cutBased[el1] >= 2 and event.Electron_cutBased[el2] >= 2:\n self.out.eecutflow.Fill(3.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.eecutflow.Fill(4.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n #if not self.isMC and (\"SinglePhoton\" in self.sample[0] and (trigger_SingleEle or trigger_SingleIsoEle)):\n # print \"ZtoEE double counting\"\n # return False\n self.out.eecutflow.Fill(5.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if self.isMC:\n eltrig_tlv = el1_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==11:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # print \"electron TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==14336:\n # #if event.TrigObj_filterBits[i]==1110000000000000:\n # print \"found matching electron\"\n # deltaR1 = trigobj_v.DeltaR(el1_v)\n # deltaR2 = trigobj_v.DeltaR(el2_v)\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # eltrig_tlv = el2_tlv\n # break\n self.TriggerWeight = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightUp = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) + self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightDown = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) - self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())*self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1 = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2 = self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1error = self.elSFs.getIdIsoSFerror(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2error = self.elSFs.getIdIsoSFerror(el2_tlv.Pt(),el2_tlv.Eta())\n \n self.LeptonWeight = IdIsoSF1*IdIsoSF2\n LeptonWeightsigma = np.sqrt((IdIsoSF1error*IdIsoSF2)**2+(IdIsoSF2error*IdIsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n V = el1_tlv + el2_tlv\n self.Ele1_pt = el1_tlv.Pt()\n self.Ele1_eta = el1_tlv.Eta()\n self.Ele1_phi = el1_tlv.Phi()\n self.Ele1_mass = el1_tlv.M()\n self.Ele2_pt = el2_tlv.Pt()\n self.Ele2_eta = el2_tlv.Eta()\n self.Ele2_phi = el2_tlv.Phi()\n self.Ele2_mass = el2_tlv.M()\n self.isZtoEE = True\n\n ########## ZtoMM #############\n self.out.mmcutflow.Fill(0.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodmuonpair = False\n for i in idx_muons:\n for j in idx_muons:\n if i==j or event.Muon_charge[i] == event.Muon_charge[j]:\n continue\n mui_tlv = ROOT.TLorentzVector()\n mui_tlv.SetPtEtaPhiM(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i],event.Muon_mass[i])\n mui_v = ROOT.TVector3()\n mui_v.SetPtEtaPhi(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i])\n muj_tlv = ROOT.TLorentzVector()\n muj_tlv.SetPtEtaPhiM(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j],event.Muon_mass[j]) \n muj_v = ROOT.TVector3()\n muj_v.SetPtEtaPhi(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j])\n dimu = mui_tlv + muj_tlv\n Z_pt = dimu.Pt()\n Z_m = dimu.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if mui_tlv.Pt() > muj_tlv.Pt():\n mu1 = i\n mu2 = j\n mu1_tlv = mui_tlv\n mu2_tlv = muj_tlv\n mu1_v = mui_v\n mu2_v = muj_v\n else:\n mu1 = j\n mu2 = i\n mu1_tlv = muj_tlv\n mu2_tlv = mui_tlv\n mu1_v = muj_v\n mu2_v = mui_v\n goodmuonpair = True\n \n\n if goodmuonpair:\n self.out.mmcutflow.Fill(1.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0] \n if mu1_tlv.Pt() > muon1_pt_cut and mu2_tlv.Pt() > muon2_pt_cut:\n self.out.mmcutflow.Fill(2.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if (mu1_highPtId >= 2 and mu2_highPtId >= 1) or (mu1_highPtId >= 1 and mu2_highPtId >= 2):\n self.out.mmcutflow.Fill(3.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.mmcutflow.Fill(4.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if not trigger_SingleMu:\n print \"ZtoMM trigger inconsistency\"\n return False\n self.out.mmcutflow.Fill(5.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if self.isMC:\n if mu1_highPtId >=2:\n mutrig_tlv = mu1_tlv\n else:\n mutrig_tlv = mu2_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==13:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # deltaR1 = trigobj_v.DeltaR(mu1_v)\n # deltaR2 = trigobj_v.DeltaR(mu2_v)\n # print \"muon TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==2048:\n # #if event.TrigObj_filterBits[i]==10000000000:\n # print \"found matching muon\"\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # mutrig_tlv = mu2_tlv\n # break\n\n self.TriggerWeight = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightUp = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) + self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightDown = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) - self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n IdSF1 = self.muSFs.getIdSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2 = self.muSFs.getIdSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1 = self.muSFs.getIsoSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2 = self.muSFs.getIsoSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IdSF1error = self.muSFs.getIdSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2error = self.muSFs.getIdSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1error = self.muSFs.getIsoSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2error = self.muSFs.getIsoSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n self.LeptonWeight = IdSF1*IdSF2*IsoSF1*IsoSF2\n LeptonWeightsigma = np.sqrt((IdSF1error*IdSF2*IsoSF1*IsoSF2)**2+(IdSF2error*IdSF1*IsoSF1*IsoSF2)**2+(IsoSF1error*IdSF1*IdSF2*IsoSF2)**2+(IsoSF2error*IdSF1*IdSF2*IsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n if mu1_tlv.DeltaR(mu2_tlv) < 0.3:\n try:\n self.Mu1_relIso = ((event.Muon_tkRelIso[mu1]*mu1_tlv.Pt()) - mu2_tlv.Pt())/mu1_tlv.Pt()\n self.Mu2_relIso = ((event.Muon_tkRelIso[mu2]*mu2_tlv.Pt()) - mu1_tlv.Pt())/mu2_tlv.Pt()\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n else:\n try:\n self.Mu1_relIso = event.Muon_tkRelIso[mu1]\n self.Mu2_relIso = event.Muon_tkRelIso[mu2]\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n V = mu1_tlv + mu2_tlv\n self.Mu1_pt = mu1_tlv.Pt()\n self.Mu1_eta = mu1_tlv.Eta()\n self.Mu1_phi = mu1_tlv.Phi()\n self.Mu1_mass = mu1_tlv.M()\n self.Mu1_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu1])[0]\n self.Mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n self.Mu2_pt = mu2_tlv.Pt()\n self.Mu2_eta = mu2_tlv.Eta()\n self.Mu2_phi = mu2_tlv.Phi()\n self.Mu2_mass = mu2_tlv.M()\n self.Mu2_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu2])[0]\n self.Mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0]\n self.isZtoMM = True\n\n \n ########### TtoEM ######### \n if not self.isZtoMM and not self.isZtoEE and self.nElectrons == 1 and self.nMuons == 1:\n if event.Electron_charge[idx_loose_electrons[0]] != event.Muon_charge[idx_loose_muons[0]]:\n el_tlv = loose_electrons_tlv_list[0]\n mu_tlv = loose_muons_tlv_list[0]\n if mu_tlv.Pt() > 30. and el_tlv.Pt() > 30.: \n V = mu_tlv + el_tlv\n if V.Pt() > 50.:\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n if self.isMC:\n self.TriggerWeight = self.elSFs.getTriggerSF(el_tlv.Pt(),el_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el_tlv.Pt(), el_tlv.Eta())\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n self.Mu1_pt = mu_tlv.Pt()\n self.Mu1_eta = mu_tlv.Eta()\n self.Mu1_phi = mu_tlv.Phi()\n self.Mu1_mass = mu_tlv.M()\n self.Ele1_pt = el_tlv.Pt()\n self.Ele1_eta = el_tlv.Eta()\n self.Ele1_phi = el_tlv.Phi()\n self.Ele1_mass = el_tlv.M()\n self.isTtoEM = True\n\n ######### ZtoNN ##########\n self.out.nncutflow.Fill(0.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if not self.isZtoMM and not self.isZtoEE and not self.isTtoEM:\n if event.PuppiMET_pt > met_pt_cut :\n self.out.nncutflow.Fill(1.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.nElectrons == 0 and self.nMuons == 0 and self.nTaus == 0:\n self.out.nncutflow.Fill(2.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n V.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi,event.PuppiMET_pt)\n V_chs.SetPtEtaPhiE(event.MET_pt,0.,event.MET_phi,event.MET_pt)\n if trigger_MET == None:\n if not self.isMC and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n else:\n if not self.isMC and not trigger_MET and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n self.out.nncutflow.Fill(3.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.filter(event) == False:\n print \"Bad event\"\n return False\n self.out.nncutflow.Fill(4.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.isMC:\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.TriggerWeight = 1.\n self.isZtoNN = True\n #stop if no semileptonic decays\n if self.isZtoEE==False and self.isZtoMM==False and self.isZtoNN==False and self.isTtoEM==False:\n return False\n ########## setting the Higgs and V index #######\n fatjet_idx_H = 0\n valid_Higgs = False\n if self.isZtoMM:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(mu1_tlv)>0.8 and fatjet_tlv.DeltaR(mu2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n\n elif self.isZtoEE:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(el1_tlv)>0.8 and fatjet_tlv.DeltaR(el2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n \n elif self.isZtoNN:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n\n ############ AK4 Jet ###########\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n self.HT += jet_pt\n if jet_eta > -2.5 and jet_eta < -1.479 and jet_phi > -1.55 and jet_phi < -0.9:\n if self.HT_HEM15_16 == -1.:\n self.HT_HEM15_16 = 0.\n self.HT_HEM15_16 += jet_pt\n if jet_pt > ak4_pt_cut and abs(jet_eta) < ak4_eta_cut:\n cleanJet = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n if cleanJet and getJetID(self.year,event,ijet):\n if len(fatjet_tlv_list) > 0 and fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n jet_tlv_list.append(jet_tlv)\n idx_jet.append(ijet)\n\n ############ AK4 Jet check for VBF ###########\n if self.isZtoMM:\n lep1_tlv = mu1_tlv\n lep2_tlv = mu2_tlv\n if self.isZtoEE:\n lep1_tlv = el1_tlv\n lep2_tlv = el2_tlv\n \n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if abs(jet_eta) < 5.0:\n if len(fatjet_tlv_list) > 0:\n if fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n if getJetID(self.year,event,ijet) and event.Jet_puId[ijet]==7:\n if self.isZtoMM or self.isZtoEE:\n if jet_tlv.DeltaR(lep1_tlv)>0.4 and jet_tlv.DeltaR(lep2_tlv)>0.4:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n elif self.isZtoNN:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n\n idx1_vbf = -1\n idx2_vbf = -1\n maxVBFmass = -1.\n for ijet1, jet1_tlv in enumerate(jet_tlv_list_vbf):\n for ijet2, jet2_tlv in enumerate(jet_tlv_list_vbf):\n if ijet1 == ijet2: continue\n eta1 = jet_tlv_list_vbf[ijet1].Eta()\n eta2 = jet_tlv_list_vbf[ijet2].Eta()\n V_VBF = jet_tlv_list_vbf[ijet1]+jet_tlv_list_vbf[ijet2]\n VBFmass = V_VBF.M()\n if abs(eta1-eta2)>4.0 and eta1*eta2<0. and VBFmass>maxVBFmass:\n idx1_vbf = ijet1\n idx2_vbf = ijet2\n maxVBFmass = VBFmass\n \n\n self.dijet_VBF_mass = maxVBFmass\n if maxVBFmass > 500.: \n self.isVBF = True\n self.Jet1_VBF_pt = jet_tlv_list_vbf[idx1_vbf].Pt()\n self.Jet1_VBF_eta = jet_tlv_list_vbf[idx1_vbf].Eta()\n self.Jet1_VBF_phi = jet_tlv_list_vbf[idx1_vbf].Phi()\n self.Jet1_VBF_mass = jet_tlv_list_vbf[idx1_vbf].M()\n self.Jet2_VBF_pt = jet_tlv_list_vbf[idx2_vbf].Pt()\n self.Jet2_VBF_eta = jet_tlv_list_vbf[idx2_vbf].Eta()\n self.Jet2_VBF_phi = jet_tlv_list_vbf[idx2_vbf].Phi()\n self.Jet2_VBF_mass = jet_tlv_list_vbf[idx2_vbf].M()\n self.deltaR_VBF = jet_tlv_list_vbf[idx1_vbf].DeltaR(jet_tlv_list_vbf[idx2_vbf])\n self.deltaR_HVBFjet1 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx1_vbf]))\n self.deltaR_HVBFjet2 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx2_vbf]))\n\n ########## Higgs ######## \n H = fatjet_tlv_list[fatjet_idx_H]\n\n if self.runJEC:\n self.H_mass_nom = event.FatJet_msoftdrop_nom[fatjet_idx_H]\n self.H_mass_jmsUp = event.FatJet_msoftdrop_jmsUp[fatjet_idx_H]\n self.H_mass_jmsDown = event.FatJet_msoftdrop_jmsDown[fatjet_idx_H]\n self.H_mass_jmrUp = event.FatJet_msoftdrop_jmrUp[fatjet_idx_H]\n self.H_mass_jmrDown = event.FatJet_msoftdrop_jmrDown[fatjet_idx_H]\n self.H_pt_nom = event.FatJet_pt_nom[fatjet_idx_H]\n self.H_pt_jesUp = event.FatJet_pt_jesTotalUp[fatjet_idx_H]\n self.H_pt_jesDown = event.FatJet_pt_jesTotalDown[fatjet_idx_H]\n self.H_pt_jerUp = event.FatJet_pt_jerUp[fatjet_idx_H]\n self.H_pt_jerDown = event.FatJet_pt_jerDown[fatjet_idx_H]\n self.PuppiMET_pt_nom = event.PuppiMET_pt_nom\n self.PuppiMET_pt_jesUp = event.PuppiMET_pt_jesTotalUp\n self.PuppiMET_pt_jesDown = event.PuppiMET_pt_jesTotalDown\n self.PuppiMET_pt_jerUp = event.PuppiMET_pt_jerUp\n self.PuppiMET_pt_jerDown = event.PuppiMET_pt_jerDown\n \n H_Eta = H.Eta()\n H_Phi = H.Phi()\n H_M = H.M()\n H_nom = ROOT.TLorentzVector()\n H_jesUp = ROOT.TLorentzVector()\n H_jesDown = ROOT.TLorentzVector()\n H_jerUp = ROOT.TLorentzVector()\n H_jerDown = ROOT.TLorentzVector()\n H_nom.SetPtEtaPhiM(self.H_pt_nom,H_Eta,H_Phi,H_M)\n H_jesUp.SetPtEtaPhiM(self.H_pt_jesUp,H_Eta,H_Phi,H_M)\n H_jesDown.SetPtEtaPhiM(self.H_pt_jesDown,H_Eta,H_Phi,H_M)\n H_jerUp.SetPtEtaPhiM(self.H_pt_jerUp,H_Eta,H_Phi,H_M)\n H_jerDown.SetPtEtaPhiM(self.H_pt_jerDown,H_Eta,H_Phi,H_M)\n MET_nom = ROOT.TLorentzVector()\n MET_jesUp = ROOT.TLorentzVector()\n MET_jesDown = ROOT.TLorentzVector()\n MET_jerUp = ROOT.TLorentzVector()\n MET_jerDown = ROOT.TLorentzVector()\n MET_nom.SetPtEtaPhiM(self.PuppiMET_pt_nom,0.,event.PuppiMET_phi,self.PuppiMET_pt_nom)\n MET_jesUp.SetPtEtaPhiM(self.PuppiMET_pt_jesUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesUp)\n MET_jesDown.SetPtEtaPhiM(self.PuppiMET_pt_jesDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesDown)\n MET_jerUp.SetPtEtaPhiM(self.PuppiMET_pt_jerUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerUp)\n MET_jerDown.SetPtEtaPhiM(self.PuppiMET_pt_jerDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerDown)\n\n for ifatjet in idx_fatjet:\n if event.FatJet_btagHbb[ifatjet] > 0.3:\n self.isBoosted4B = True\n\n \n self.nJetsNoFatJet = len(jet_tlv_list)\n \n if self.isZtoNN:\n self.DPhi = abs(MET_tlv.DeltaPhi(H))\n else:\n self.DPhi = abs(V.DeltaPhi(H))\n \n self.VH_deltaR = H.DeltaR(V)\n \n jet_list_temp = []\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if jet_tlv.DeltaR(H) < 0.8:\n jet_list_temp.append(ijet)\n if len(jet_list_temp) == 1:\n idx = jet_list_temp[0]\n self.H_chf = event.Jet_chHEF[idx]\n self.H_nhf = event.Jet_neHEF[idx]\n elif len(jet_list_temp) == 2:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n self.H_chf = (chf1*pt1+chf2*pt2)/(pt1+pt2) \n self.H_nhf = (nhf1*pt1+nhf2*pt2)/(pt1+pt2)\n elif len(jet_list_temp) == 3:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n idx3 = jet_list_temp[2]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n pt3 = event.Jet_pt[idx3]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n chf3 = event.Jet_chHEF[idx3]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n nhf3 = event.Jet_neHEF[idx3]\n self.H_chf = (chf1*pt1+chf2*pt2+chf3*pt3)/(pt1+pt2+pt3) \n self.H_nhf = (nhf1*pt1+nhf2*pt2+nhf3*pt3)/(pt1+pt2+pt3)\n\n\n\n for jet_tlv in jet_tlv_list:\n if abs(MET_tlv.DeltaPhi(jet_tlv)) < self.MinJetMetDPhi:\n self.MinJetMetDPhi = abs(MET_tlv.DeltaPhi(jet_tlv))\n\n\n for ijet in idx_jet:\n if event.Jet_btagDeepB[ijet] > self.MaxJetNoFatJetBTag:\n self.MaxJetNoFatJetBTag = event.Jet_btagDeepB[ijet]\n\n if not self.isData:\n for igenjet in range(event.nGenJetAK8):\n genjetAK8_tlv = ROOT.TLorentzVector()\n genjetAK8_tlv.SetPtEtaPhiM(event.GenJetAK8_pt[igenjet], event.GenJetAK8_eta[igenjet], event.GenJetAK8_phi[igenjet], event.GenJetAK8_mass[igenjet])\n if H.DeltaR(genjetAK8_tlv) < 0.8:\n self.H_hadronflavour = struct.unpack('B',event.GenJetAK8_hadronFlavour[igenjet])[0]\n self.H_partonflavour = event.GenJetAK8_partonFlavour[igenjet]\n self.btagToolAK4_deep.fillEfficiencies(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep = self.btagToolAK4_deep.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_up = self.btagToolAK4_deep_up.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_down = self.btagToolAK4_deep_down.getWeight(event,idx_jet,fatjet_idx_H)\n #search for AK4 jets which match with the subjets from the H\n ak4_subjets = []\n subjet1 = TLorentzVector()\n subjet2 = TLorentzVector()\n subjet1_idx = event.FatJet_subJetIdx1[fatjet_idx_H]\n subjet2_idx = event.FatJet_subJetIdx2[fatjet_idx_H]\n if subjet1_idx>=0. and subjet2_idx>=0.:\n subjet1.SetPtEtaPhiM(event.SubJet_pt[subjet1_idx],event.SubJet_eta[subjet1_idx],event.SubJet_phi[subjet1_idx],event.SubJet_mass[subjet1_idx])\n subjet2.SetPtEtaPhiM(event.SubJet_pt[subjet2_idx],event.SubJet_eta[subjet2_idx],event.SubJet_phi[subjet2_idx],event.SubJet_mass[subjet2_idx])\n for jetid in range(event.nJet):\n ak4jet = TLorentzVector()\n ak4jet.SetPtEtaPhiM(event.Jet_pt[jetid],event.Jet_eta[jetid],event.Jet_phi[jetid],event.Jet_mass[jetid])\n if ak4jet.DeltaR(subjet1)<0.4:\n ak4_subjets.append(jetid)\n if ak4jet.DeltaR(subjet2)<0.4:\n ak4_subjets.append(jetid)\n self.btagToolAK8_deep.fillEfficiencies(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep = self.btagToolAK8_deep.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_up = self.btagToolAK8_deep_up.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_down = self.btagToolAK8_deep_down.getWeight(event,ak4_subjets,fatjet_idx_H)\n ########### X and variables ############\n X = V + H\n if self.isZtoNN:\n X_chs = V_chs + H\n self.X_mass_chs = X_chs.M()\n\n if self.runJEC:\n X_nom = V + H_nom\n X_jesUp = V + H_jesUp\n X_jesDown = V + H_jesDown\n X_jerUp = V + H_jerUp\n X_jerDown = V + H_jerDown\n X_MET_nom = MET_nom + H_nom\n X_MET_jesUp = MET_jesUp + H_jesUp\n X_MET_jesDown = MET_jesDown + H_jesDown\n X_MET_jerUp = MET_jerUp + H_jerUp\n X_MET_jerDown = MET_jerDown + H_jerDown\n self.X_mass_nom = X_nom.M()\n self.X_mass_jesUp = X_jesUp.M()\n self.X_mass_jesDown = X_jesDown.M()\n self.X_mass_jerUp = X_jerUp.M()\n self.X_mass_jerDown = X_jerDown.M()\n self.X_mass_MET_nom = X_MET_nom.M()\n self.X_mass_MET_jesUp = X_MET_jesUp.M()\n self.X_mass_MET_jesDown = X_MET_jesDown.M()\n self.X_mass_MET_jerUp = X_MET_jerUp.M()\n self.X_mass_MET_jerDown = X_MET_jerDown.M()\n\n self.V_pt = V.Pt()\n self.V_eta = V.Eta()\n self.V_phi = V.Phi()\n self.V_mass = V.M()\n \n if self.isZtoNN:\n self.V_mass = 0.\n\n self.H_pt = H.Pt()\n self.H_eta = H.Eta()\n self.H_phi = H.Phi()\n self.H_M = H.M()\n self.H_mass = event.FatJet_msoftdrop[fatjet_idx_H]\n self.X_pt = X.Pt()\n self.X_eta = X.Eta()\n self.X_phi = X.Phi()\n self.X_mass = X.M()\n\n\n self.H_dbt = event.FatJet_btagHbb[fatjet_idx_H]\n self.BtagDeepB = event.FatJet_btagDeepB[fatjet_idx_H]\n self.DeepTagMD_H4qvsQCD = event.FatJet_deepTagMD_H4qvsQCD[fatjet_idx_H]\n self.DeepTagMD_HbbvsQCD = event.FatJet_deepTagMD_HbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZHbbvsQCD = event.FatJet_deepTagMD_ZHbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZbbvsQCD = event.FatJet_deepTagMD_ZbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_bbvsLight = event.FatJet_deepTagMD_bbvsLight[fatjet_idx_H]\n self.DeepTagMD_WvsQCD = event.FatJet_deepTagMD_WvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZvsQCD = event.FatJet_deepTagMD_ZvsQCD[fatjet_idx_H]\n self.H_tau21 = fatjet_tau21_list[fatjet_idx_H]\n self.H_tau41 = fatjet_tau41_list[fatjet_idx_H]\n self.H_tau42 = fatjet_tau42_list[fatjet_idx_H]\n self.H_tau31 = fatjet_tau31_list[fatjet_idx_H]\n self.H_tau32 = fatjet_tau32_list[fatjet_idx_H]\n self.VHDEta = abs(V.Eta() - H.Eta())\n\n \n \n if event.FatJet_subJetIdx1[fatjet_idx_H] >= 0:\n Hcsv1 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx1[fatjet_idx_H]]\n Hdeepcsv1 = event.SubJet_btagDeepB[event.FatJet_subJetIdx1[fatjet_idx_H]]\n else:\n Hcsv1 = -1.\n Hdeepcsv1 = -1.\n if event.FatJet_subJetIdx2[fatjet_idx_H] >= 0:\n Hcsv2 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx2[fatjet_idx_H]]\n Hdeepcsv2 = event.SubJet_btagDeepB[event.FatJet_subJetIdx2[fatjet_idx_H]]\n else:\n Hcsv2 = -1.\n Hdeepcsv2 = -1.\n \n self.H_csv1 = max(Hcsv1,Hcsv2)\n self.H_csv2 = min(Hcsv1,Hcsv2)\n self.H_deepcsv1 = max(Hdeepcsv1,Hdeepcsv2)\n self.H_deepcsv2 = min(Hdeepcsv1,Hdeepcsv2)\n\n\n if self.year == 2016:\n wp_loose = 0.2217\n wp_medium = 0.6321\n wp_tight = 0.8953\n elif self.year == 2017:\n wp_loose = 0.1522\n wp_medium = 0.4941\n wp_tight = 0.8001\n elif self.year == 2018:\n wp_loose = 0.1241\n wp_medium = 0.4184\n wp_tight = 0.7527\n\n if self.H_deepcsv2 > wp_loose:\n self.isHtobb = True\n if self.H_deepcsv1 > wp_medium and self.H_deepcsv2 > wp_loose:\n self.isHtobb_ml = True\n\n if self.MaxJetNoFatJetBTag > wp_loose:\n self.isMaxBTag_loose = True\n if self.MaxJetNoFatJetBTag > wp_medium:\n self.isMaxBTag_medium = True\n if self.MaxJetNoFatJetBTag > wp_tight:\n self.isMaxBTag_tight = True\n\n \n if self.H_mass != 0.:\n self.H_ddt = self.H_tau21 + 0.082 *np.log(self.H_mass*self.H_mass/self.H_pt)\n else:\n self.H_ddt = -1.\n \n self.X_tmass = np.sqrt(2.*V.Pt()*fatjet_tlv_list[fatjet_idx_H].Pt()*(1.-np.cos(fatjet_tlv_list[fatjet_idx_H].DeltaPhi(V))))\n if self.isZtoNN:\n self.X_mass = self.X_tmass\n else:\n self.X_mass = X.M()\n if self.X_mass > 750 and self.VH_deltaR > 2:\n if self.MinJetMetDPhi>0.5 and self.DPhi>2:\n for i,weight in enumerate(nncutflow_list):\n self.out.nncutflow_inc.Fill(i,weight)\n if self.VHDEta<1.3:\n for i,weight in enumerate(eecutflow_list):\n self.out.eecutflow_inc.Fill(i,weight)\n for i,weight in enumerate(mmcutflow_list):\n self.out.mmcutflow_inc.Fill(i,weight)\n \n if self.isZtoEE or self.isZtoMM or self.isZtoNN or self.isTtoEM:\n self.fillBranches(event)\n return True\n \n\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
import logging
from django.shortcuts import render, redirect, HttpResponse
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.hashers import make_password
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from django.db import connection
from django.db.models import Count
from models import *
from forms import *
import json
logger = logging.getLogger('blog.views')
# Create your views here.
def global_setting(request):
# 站点基本信息
SITE_URL = settings.SITE_URL
SITE_NAME = settings.SITE_NAME
SITE_DESC = settings.SITE_DESC
# 分类信息获取(导航数据)
category_list = Category.objects.all()[:6]
# 文章归档数据
archive_list = Article.objects.distinct_date()
行
comment_count_list = Comment.objects.values('article').annotate(comment_count=Count('article')).order_by('-comment_count')
article_comment_list = [Article.objects.get(pk=comment['article']) for comment in comment_count_list]
return locals()
def index(request):
try:
# 最新文章数据
article_list = Article.objects.all()
article_list = getPage(request, article_list)
# 文章归档
# 1、先要去获取到文章中有的 年份-月份 2015/06文章归档
# 使用values和distinct去掉重复数据(不可行)
# print Article.objects.values('date_publish').distinct()
# 直接执行原生sql呢?
# 第一种方式(不可行)
# archive_list =Article.objects.raw('SELECT id, DATE_FORMAT(date_publish, "%%Y-%%m") as col_date FROM blog_article ORDER BY date_publish')
# for archive in archive_list:
# print archive
# 第二种方式(不推荐)
# cursor = connection.cursor()
# cursor.execute("SELECT DISTINCT DATE_FORMAT(date_publish, '%Y-%m') as col_date FROM blog_article ORDER BY date_publish")
# row = cursor.fetchall()
# print row
except Exception as e:
print e
logger.error(e)
return render(request, 'index.html', locals())
def archive(request):
try:
# 先获取客户端提交的信息
year = request.GET.get('year', None)
month = request.GET.get('month', None)
article_list = Article.objects.filter(date_publish__icontains=year+'-'+month)
article_list = getPage(request, article_list)
except Exception as e:
logger.error(e)
return render(request, 'archive.html', locals())
# 按标签查询对应的文章列表
def tag(request):
try:
pass
except Exception as e:
logger.error(e)
return render(request, 'archive.html', locals())
# 分页代码
def getPage(request, article_list):
paginator = Paginator(article_list, 2)
try:
page = int(request.GET.get('page', 1))
article_list = paginator.page(page)
except (EmptyPage, InvalidPage, PageNotAnInteger):
article_list = paginator.page(1)
return article_list
# 文章详情
def article(request):
try:
# 获取文章id
id = request.GET.get('id', None)
try:
# 获取文章信息
article = Article.objects.get(pk=id)
except Article.DoesNotExist:
return render(request, 'failure.html', {'reason': '没有找到对应的文章'})
# 评论表单
comment_form = CommentForm({'author': request.user.username,
'email': request.user.email,
'url': request.user.url,
'article': id} if request.user.is_authenticated() else{'article': id})
# 获取评论信息
comments = Comment.objects.filter(article=article).order_by('id')
comment_list = []
for comment in comments:
for item in comment_list:
if not hasattr(item, 'children_comment'):
setattr(item, 'children_comment', [])
if comment.pid == item:
item.children_comment.append(comment)
break
if comment.pid is None:
comment_list.append(comment)
except Exception as e:
print e
logger.error(e)
return render(request, 'article.html', locals())
# 提交评论
def comment_post(request):
try:
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
#获取表单信息
comment = Comment.objects.create(username=comment_form.cleaned_data["author"],
email=comment_form.cleaned_data["email"],
url=comment_form.cleaned_data["url"],
content=comment_form.cleaned_data["comment"],
article_id=comment_form.cleaned_data["article"],
user=request.user if request.user.is_authenticated() else None)
comment.save()
else:
return render(request, 'failure.html', {'reason': comment_form.errors})
except Exception as e:
logger.error(e)
return redirect(request.META['HTTP_REFERER'])
# 注销
def do_logout(request):
try:
logout(request)
except Exception as e:
print e
logger.error(e)
return redirect(request.META['HTTP_REFERER'])
# 注册
def do_reg(request):
try:
if request.method == 'POST':
reg_form = RegForm(request.POST)
if reg_form.is_valid():
# 注册
user = User.objects.create(username=reg_form.cleaned_data["username"],
email=reg_form.cleaned_data["email"],
url=reg_form.cleaned_data["url"],
password=make_password(reg_form.cleaned_data["password"]),)
user.save()
# 登录
user.backend = 'django.contrib.auth.backends.ModelBackend' # 指定默认的登录验证方式
login(request, user)
return redirect(request.POST.get('source_url'))
else:
return render(request, 'failure.html', {'reason': reg_form.errors})
else:
reg_form = RegForm()
except Exception as e:
logger.error(e)
return render(request, 'reg.html', locals())
# 登录
def do_login(request):
try:
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
# 登录
username = login_form.cleaned_data["username"]
password = login_form.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user is not None:
user.backend = 'django.contrib.auth.backends.ModelBackend' # 指定默认的登录验证方式
login(request, user)
else:
return render(request, 'failure.html', {'reason': '登录验证失败'})
return redirect(request.POST.get('source_url'))
else:
return render(request, 'failure.html', {'reason': login_form.errors})
else:
login_form = LoginForm()
except Exception as e:
logger.error(e)
return render(request, 'login.html', locals())
def category(request):
try:
# 先获取客户端提交的信息
cid = request.GET.get('cid', None)
try:
category = Category.objects.get(pk=cid)
except Category.DoesNotExist:
return render(request, 'failure.html', {'reason': '分类不存在'})
article_list = Article.objects.filter(category=category)
article_list = getPage(request, article_list)
except Exception as e:
logger.error(e)
return render(request, 'category.html', locals())
|
normal
|
{
"blob_id": "0b1e6a95ee008c594fdcff4e216708c003c065c8",
"index": 4873,
"step-1": "# -*- coding: utf-8 -*-\nimport logging\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.contrib.auth import logout, login, authenticate\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger\nfrom django.db import connection\nfrom django.db.models import Count\nfrom models import *\nfrom forms import *\nimport json\n\nlogger = logging.getLogger('blog.views')\n\n# Create your views here.\ndef global_setting(request):\n # 站点基本信息\n SITE_URL = settings.SITE_URL\n SITE_NAME = settings.SITE_NAME\n SITE_DESC = settings.SITE_DESC\n # 分类信息获取(导航数据)\n category_list = Category.objects.all()[:6]\n # 文章归档数据\n archive_list = Article.objects.distinct_date()\n 行\n comment_count_list = Comment.objects.values('article').annotate(comment_count=Count('article')).order_by('-comment_count')\n article_comment_list = [Article.objects.get(pk=comment['article']) for comment in comment_count_list]\n return locals()\n\ndef index(request):\n try:\n # 最新文章数据\n article_list = Article.objects.all()\n article_list = getPage(request, article_list)\n # 文章归档\n # 1、先要去获取到文章中有的 年份-月份 2015/06文章归档\n # 使用values和distinct去掉重复数据(不可行)\n # print Article.objects.values('date_publish').distinct()\n # 直接执行原生sql呢?\n # 第一种方式(不可行)\n # archive_list =Article.objects.raw('SELECT id, DATE_FORMAT(date_publish, \"%%Y-%%m\") as col_date FROM blog_article ORDER BY date_publish')\n # for archive in archive_list:\n # print archive\n # 第二种方式(不推荐)\n # cursor = connection.cursor()\n # cursor.execute(\"SELECT DISTINCT DATE_FORMAT(date_publish, '%Y-%m') as col_date FROM blog_article ORDER BY date_publish\")\n # row = cursor.fetchall()\n # print row\n except Exception as e:\n print e\n logger.error(e)\n return render(request, 'index.html', locals())\n\ndef archive(request):\n try:\n # 先获取客户端提交的信息\n year = request.GET.get('year', None)\n month = request.GET.get('month', None)\n article_list = Article.objects.filter(date_publish__icontains=year+'-'+month)\n article_list = getPage(request, article_list)\n except Exception as e:\n logger.error(e)\n return render(request, 'archive.html', locals())\n\n# 按标签查询对应的文章列表\ndef tag(request):\n try:\n \n pass\n\n except Exception as e:\n logger.error(e)\n return render(request, 'archive.html', locals())\n\n# 分页代码\ndef getPage(request, article_list):\n paginator = Paginator(article_list, 2)\n try:\n page = int(request.GET.get('page', 1))\n article_list = paginator.page(page)\n except (EmptyPage, InvalidPage, PageNotAnInteger):\n article_list = paginator.page(1)\n return article_list\n\n# 文章详情\ndef article(request):\n try:\n # 获取文章id\n id = request.GET.get('id', None)\n try:\n # 获取文章信息\n article = Article.objects.get(pk=id)\n except Article.DoesNotExist:\n return render(request, 'failure.html', {'reason': '没有找到对应的文章'})\n\n # 评论表单\n comment_form = CommentForm({'author': request.user.username,\n 'email': request.user.email,\n 'url': request.user.url,\n 'article': id} if request.user.is_authenticated() else{'article': id})\n # 获取评论信息\n comments = Comment.objects.filter(article=article).order_by('id')\n comment_list = []\n for comment in comments:\n for item in comment_list:\n if not hasattr(item, 'children_comment'):\n setattr(item, 'children_comment', [])\n if comment.pid == item:\n item.children_comment.append(comment)\n break\n if comment.pid is None:\n comment_list.append(comment)\n except Exception as e:\n print e\n logger.error(e)\n return render(request, 'article.html', locals())\n\n# 提交评论\ndef comment_post(request):\n try:\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid():\n #获取表单信息\n comment = Comment.objects.create(username=comment_form.cleaned_data[\"author\"],\n email=comment_form.cleaned_data[\"email\"],\n url=comment_form.cleaned_data[\"url\"],\n content=comment_form.cleaned_data[\"comment\"],\n article_id=comment_form.cleaned_data[\"article\"],\n user=request.user if request.user.is_authenticated() else None)\n comment.save()\n else:\n return render(request, 'failure.html', {'reason': comment_form.errors})\n except Exception as e:\n logger.error(e)\n return redirect(request.META['HTTP_REFERER'])\n\n# 注销\ndef do_logout(request):\n try:\n logout(request)\n except Exception as e:\n print e\n logger.error(e)\n return redirect(request.META['HTTP_REFERER'])\n\n# 注册\ndef do_reg(request):\n try:\n if request.method == 'POST':\n reg_form = RegForm(request.POST)\n if reg_form.is_valid():\n # 注册\n user = User.objects.create(username=reg_form.cleaned_data[\"username\"],\n email=reg_form.cleaned_data[\"email\"],\n url=reg_form.cleaned_data[\"url\"],\n password=make_password(reg_form.cleaned_data[\"password\"]),)\n user.save()\n\n # 登录\n user.backend = 'django.contrib.auth.backends.ModelBackend' # 指定默认的登录验证方式\n login(request, user)\n return redirect(request.POST.get('source_url'))\n else:\n return render(request, 'failure.html', {'reason': reg_form.errors})\n else:\n reg_form = RegForm()\n except Exception as e:\n logger.error(e)\n return render(request, 'reg.html', locals())\n\n# 登录\ndef do_login(request):\n try:\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n # 登录\n username = login_form.cleaned_data[\"username\"]\n password = login_form.cleaned_data[\"password\"]\n user = authenticate(username=username, password=password)\n if user is not None:\n user.backend = 'django.contrib.auth.backends.ModelBackend' # 指定默认的登录验证方式\n login(request, user)\n else:\n return render(request, 'failure.html', {'reason': '登录验证失败'})\n return redirect(request.POST.get('source_url'))\n else:\n return render(request, 'failure.html', {'reason': login_form.errors})\n else:\n login_form = LoginForm()\n except Exception as e:\n logger.error(e)\n return render(request, 'login.html', locals())\n\ndef category(request):\n try:\n # 先获取客户端提交的信息\n cid = request.GET.get('cid', None)\n try:\n category = Category.objects.get(pk=cid)\n except Category.DoesNotExist:\n return render(request, 'failure.html', {'reason': '分类不存在'})\n article_list = Article.objects.filter(category=category)\n article_list = getPage(request, article_list)\n except Exception as e:\n logger.error(e)\n return render(request, 'category.html', locals())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr) #cr.send(None)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
@coroutine
def grep_python_coroutine():
g = grep('python')
yield from g
g = grep('python')
#next(g) #g.send(None)
g.send("php is better")
g.send("python is simplier")
g.close()
|
normal
|
{
"blob_id": "bebe098c5abb579eb155a1dc325347d100ddfa8f",
"index": 1805,
"step-1": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n<mask token>\n",
"step-2": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n<mask token>\n",
"step-3": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n@coroutine\ndef grep_python_coroutine():\n g = grep('python')\n yield from g\n\n\n<mask token>\n",
"step-4": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n@coroutine\ndef grep_python_coroutine():\n g = grep('python')\n yield from g\n\n\n<mask token>\ng.send('php is better')\ng.send('python is simplier')\ng.close()\n",
"step-5": "def coroutine(func):\n\tdef start_coroutine(*args, **kwargs):\n\t\tcr = func(*args, **kwargs)\n\t\tnext(cr) #cr.send(None)\n\t\treturn cr\n\treturn start_coroutine\n\n@coroutine\ndef grep(pattern):\n\tprint('start grep')\n\ttry:\n\t\twhile True:\n\t\t\tline = yield\n\t\t\tif pattern in line:\n\t\t\t\tprint(line)\n\texcept GeneratorExit:\n\t\tprint('stop grep')\n\n@coroutine\ndef grep_python_coroutine():\n\tg = grep('python') \n\tyield from g\n\ng = grep('python')\n#next(g) #g.send(None)\ng.send(\"php is better\")\ng.send(\"python is simplier\")\ng.close()",
"step-ids": [
1,
2,
3,
4,
6
]
}
|
[
1,
2,
3,
4,
6
] |
from django.contrib import admin
from .models import Recipe, Ingredient, ChosenIngredient, timezone
# Register your models here.)
admin.site.register(Ingredient)
admin.site.site_header = "Chef's Apprentice Admin"
admin.site.site_title = "Chef's Apprentice Admin Portal"
admin.site.index_title = "Welcome to Chef's Apprentice Admin Portal"
class ChosenIngredientInLine(admin.TabularInline):
model = ChosenIngredient
# definerer hva som skal vises på Recipe displayet i admin siden
class RecipeAdmin(admin.ModelAdmin):
list_display = ("title", "visible", "author")
actions = ["make_visible", "make_hidden", "delete_selected"]
exclude = ('date_posted', 'ingredients')
inlines = [
ChosenIngredientInLine,
]
class Meta:
model = Recipe
# funksjon for å sette make_visible og hidden som actions i admin siden
def make_visible(self, request, queryset):
queryset.update(visible=True)
queryset.update(date_posted=timezone.now())
def make_hidden(self, request, queryset):
queryset.update(visible=False)
# synliggjør disse modellene i admin-siden
admin.site.register(Recipe, RecipeAdmin)
|
normal
|
{
"blob_id": "65bb3743ca569c295d85016c82c4f6f043778d3f",
"index": 8848,
"step-1": "<mask token>\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n <mask token>\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = 'title', 'visible', 'author'\n actions = ['make_visible', 'make_hidden', 'delete_selected']\n exclude = 'date_posted', 'ingredients'\n inlines = [ChosenIngredientInLine]\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n model = ChosenIngredient\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = 'title', 'visible', 'author'\n actions = ['make_visible', 'make_hidden', 'delete_selected']\n exclude = 'date_posted', 'ingredients'\n inlines = [ChosenIngredientInLine]\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\n<mask token>\n",
"step-4": "<mask token>\nadmin.site.register(Ingredient)\nadmin.site.site_header = \"Chef's Apprentice Admin\"\nadmin.site.site_title = \"Chef's Apprentice Admin Portal\"\nadmin.site.index_title = \"Welcome to Chef's Apprentice Admin Portal\"\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n model = ChosenIngredient\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = 'title', 'visible', 'author'\n actions = ['make_visible', 'make_hidden', 'delete_selected']\n exclude = 'date_posted', 'ingredients'\n inlines = [ChosenIngredientInLine]\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\nadmin.site.register(Recipe, RecipeAdmin)\n",
"step-5": "from django.contrib import admin\nfrom .models import Recipe, Ingredient, ChosenIngredient, timezone\n\n# Register your models here.)\nadmin.site.register(Ingredient)\nadmin.site.site_header = \"Chef's Apprentice Admin\"\nadmin.site.site_title = \"Chef's Apprentice Admin Portal\"\nadmin.site.index_title = \"Welcome to Chef's Apprentice Admin Portal\"\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n model = ChosenIngredient\n\n# definerer hva som skal vises på Recipe displayet i admin siden\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = (\"title\", \"visible\", \"author\")\n actions = [\"make_visible\", \"make_hidden\", \"delete_selected\"]\n exclude = ('date_posted', 'ingredients')\n inlines = [\n ChosenIngredientInLine,\n ]\n\n class Meta:\n model = Recipe\n\n # funksjon for å sette make_visible og hidden som actions i admin siden\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n# synliggjør disse modellene i admin-siden\nadmin.site.register(Recipe, RecipeAdmin)\n",
"step-ids": [
3,
5,
6,
8,
10
]
}
|
[
3,
5,
6,
8,
10
] |
<|reserved_special_token_0|>
class Submit(webapp2.RequestHandler):
<|reserved_special_token_0|>
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Contact(db.Expando):
<|reserved_special_token_0|>
pid = db.StringProperty(required=True)
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
""" Home page handler """
def get(self):
""" Show home page """
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'Welcome %s!' % (contact.name,)
else:
contact = 'Invalid dhs.sg user'
greeting = ''
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
contact = 'Not authorised'
greeting = 'You need to'
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
""" Submit form """
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.
path.dirname(__file__)))
class Contact(db.Expando):
""" User data model """
pid = db.StringProperty(required=True)
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
""" Home page handler """
def get(self):
""" Show home page """
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'Welcome %s!' % (contact.name,)
else:
contact = 'Invalid dhs.sg user'
greeting = ''
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
contact = 'Not authorised'
greeting = 'You need to'
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
""" Submit form """
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
contact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',
email='lim.ahseng@dhs.sg', handphone='', tickets_csjh='', tickets_edssh
='', remark='')
contact2.put()
app = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],
debug=True)
<|reserved_special_token_1|>
import webapp2
import jinja2
import os
import csv
from google.appengine.api import users
from google.appengine.ext import db
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.
path.dirname(__file__)))
class Contact(db.Expando):
""" User data model """
pid = db.StringProperty(required=True)
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
""" Home page handler """
def get(self):
""" Show home page """
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'Welcome %s!' % (contact.name,)
else:
contact = 'Invalid dhs.sg user'
greeting = ''
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
contact = 'Not authorised'
greeting = 'You need to'
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
""" Submit form """
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
contact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',
email='lim.ahseng@dhs.sg', handphone='', tickets_csjh='', tickets_edssh
='', remark='')
contact2.put()
app = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],
debug=True)
<|reserved_special_token_1|>
#!/usr/bin/env python
import webapp2 # web application framework
import jinja2 # template engine
import os # access file system
import csv
from google.appengine.api import users # Google account authentication
from google.appengine.ext import db # datastore
# initialise template
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class Contact(db.Expando): # allows for different number of fields
''' User data model '''
pid = db.StringProperty(required=True) # string = 500 char, allow field to be indexed, perform faster
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
''' Home page handler '''
def get(self):
''' Show home page '''
# import data
# check if valid Google account
# school_register = csv.reader(open('data.csv'),delimiter=',')
# found = False
user = users.get_current_user()
# for student in school_register: # if valid logged in user
# if student[0] == self.request.get('pid'):
# contact = student
# found = True
# break
if user:
# logout link
url = users.create_logout_url(self.request.uri)
# logout text
url_linktext = 'Logout'
# retrieve user record from datastore
# may get multiple records, so in order to get one record:
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result: #if user record found
contact = result[0]
greeting = ("Welcome %s!" % (contact.name,)) #1 item in couple = put comma
else: #not found
contact = "Invalid dhs.sg user"
greeting = ""
else: # not logged in
# login link
url = users.create_login_url(self.request.uri)
# login text
url_linktext = 'Login'
contact = "Not authorised"
greeting = "You need to"
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
}
# create index.html template
template = jinja_environment.get_template('index.html')
# associate template values with template
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
''' Submit form '''
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = ("User: %s" % (contact.name,))
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
'contact.handphone': updated_handphone,
'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark,
}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
# main
contact2 = Contact(pid = 'lim.ahseng', name = 'Lim Ah Seng', class12 = '5C99', email = 'lim.ahseng@dhs.sg', handphone = '', tickets_csjh = '', tickets_edssh = '', remark = '')
contact2.put()
app = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],
debug=True)
|
flexible
|
{
"blob_id": "aeef27d667f95e3818f73533439385ea949b96a4",
"index": 2445,
"step-1": "<mask token>\n\n\nclass Submit(webapp2.RequestHandler):\n <mask token>\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Contact(db.Expando):\n <mask token>\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\n<mask token>\n",
"step-3": "<mask token>\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)))\n\n\nclass Contact(db.Expando):\n \"\"\" User data model \"\"\"\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\ncontact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',\n email='lim.ahseng@dhs.sg', handphone='', tickets_csjh='', tickets_edssh\n ='', remark='')\ncontact2.put()\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],\n debug=True)\n",
"step-4": "import webapp2\nimport jinja2\nimport os\nimport csv\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)))\n\n\nclass Contact(db.Expando):\n \"\"\" User data model \"\"\"\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\ncontact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',\n email='lim.ahseng@dhs.sg', handphone='', tickets_csjh='', tickets_edssh\n ='', remark='')\ncontact2.put()\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],\n debug=True)\n",
"step-5": "#!/usr/bin/env python\n\nimport webapp2 # web application framework\nimport jinja2 # template engine\nimport os \t # access file system\nimport csv\nfrom google.appengine.api import users\t# Google account authentication\nfrom google.appengine.ext import db\t\t# datastore\n\n# initialise template\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\nclass Contact(db.Expando): # allows for different number of fields\n\t''' User data model '''\n\tpid = db.StringProperty(required=True) # string = 500 char, allow field to be indexed, perform faster\n\tname = db.StringProperty(required=True)\n\tclass12 = db.StringProperty(required=True)\n\temail = db.EmailProperty(required=True)\n\thandphone = db.StringProperty(required=False)\n\ttickets_csjh = db.StringProperty(required=False)\n\ttickets_edssh = db.StringProperty(required=False)\n\tremark = db.TextProperty()\n\n\t\nclass MainHandler(webapp2.RequestHandler):\n\t''' Home page handler '''\n\tdef get(self):\n\t\t''' Show home page '''\n\t\t# import data\n\t\t# check if valid Google account\n#\t\tschool_register = csv.reader(open('data.csv'),delimiter=',')\n#\t\tfound = False\n\t\tuser = users.get_current_user()\n\t\n#\t\tfor student in school_register:\t# if valid logged in user\n#\t\t\tif student[0] == self.request.get('pid'):\n#\t\t\t\tcontact = student\n#\t\t\t\tfound = True\n#\t\t\t\tbreak\n\n\t\tif user: \n\t\t\t# logout link\n\t\t\turl = users.create_logout_url(self.request.uri)\n\t\t\t# logout text\n\t\t\turl_linktext = 'Logout'\n\t\t\t# retrieve user record from datastore\n\t\t\t# may get multiple records, so in order to get one record:\n\t\t\tquery = Contact.gql('WHERE pid = :1', user.nickname())\n\t\t\tresult = query.fetch(1)\n\t\t\tif result: #if user record found\n\t\t\t\tcontact = result[0]\n\t\t\t\tgreeting = (\"Welcome %s!\" % (contact.name,)) #1 item in couple = put comma\n\t\t\telse: #not found\n\t\t\t\tcontact = \"Invalid dhs.sg user\"\n\t\t\t\tgreeting = \"\"\n\t\t\t\n\t\telse: # not logged in \n\t\t\t\t# login link\n\t\t\turl = users.create_login_url(self.request.uri)\n\t\t\t\t# login text\n\t\t\turl_linktext = 'Login'\n\t\t\tcontact = \"Not authorised\"\n\t\t\tgreeting = \"You need to\"\n\t\t\t\n\t\ttemplate_values = {\n\t\t\t'contact': contact,\n\t\t\t'greeting': greeting,\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t}\n\t\t\n\t\t# create index.html template\n\t\ttemplate = jinja_environment.get_template('index.html')\n\t\t# associate template values with template\n\t\tself.response.out.write(template.render(template_values))\n\nclass Submit(webapp2.RequestHandler):\n\t''' Submit form '''\n\tdef post(self):\n\t\tif self.request.get('submit'):\n\t\t\tupdated_handphone = self.request.get('handphone')\n\t\t\tupdated_tickets_csjh = self.request.get('tickets_csjh')\n\t\t\tupdated_tickets_edssh = self.request.get('tickets_edssh')\n\t\t\tupdated_remark = self.request.get('remark')\n\t\t\turl = users.create_logout_url(self.request.uri)\n\t\t\turl_linktext = 'Logout'\n\t\t\tuser = users.get_current_user()\n\t\t\tquery = Contact.gql('WHERE pid = :1', user.nickname())\n\t\t\tresult = query.fetch(1)\n\t\t\t\n\t\t\tif result: \n\t\t\t\tcontact = result[0]\n\t\t\t\tgreeting = (\"User: %s\" % (contact.name,)) \n\t\t\t\tcontact.handphone = updated_handphone\n\t\t\t\tcontact.tickets_csjh = updated_tickets_csjh\n\t\t\t\tcontact.tickets_edssh = updated_tickets_edssh\n\t\t\t\tcontact.remark = db.Text(updated_remark)\n\t\t\t\tcontact.put()\n\t\t\telse: \t\n\t\t\t\tself.response.out.write('Reservation failed!')\n\t\n\t\t\n\t\ttemplate_values = {\n\t\t\t'contact': contact,\n\t\t\t'greeting': greeting,\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t\t'contact.handphone': updated_handphone,\n\t\t\t'contact.tickets_csjh': updated_tickets_csjh,\n\t\t\t'contact.tickets_edssh': updated_tickets_edssh,\n\t\t\t'contact.remark': updated_remark,\n\t\t}\n\t\t\n\t\ttemplate = jinja_environment.get_template('submit.html') \n\t\tself.response.out.write(template.render(template_values))\n\n# main\n\ncontact2 = Contact(pid = 'lim.ahseng', name = 'Lim Ah Seng', class12 = '5C99', email = 'lim.ahseng@dhs.sg', handphone = '', tickets_csjh = '', tickets_edssh = '', remark = '')\ncontact2.put()\n\t\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)], \n\t\t\t\t\t\t\t\tdebug=True)\n\n \n",
"step-ids": [
2,
8,
11,
12,
13
]
}
|
[
2,
8,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('user', '0001_initial')]
operations = [migrations.CreateModel(name='history', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('uname', models.CharField(max_length=
50, verbose_name='用户名')), ('uword', models.CharField(max_length=50,
verbose_name='单词')), ('time', models.DateTimeField(auto_now=True,
verbose_name='查询时间')), ('isban', models.BooleanField(default=False,
verbose_name='禁用')), ('isdelete', models.BooleanField(default=False,
verbose_name='删除'))])]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('user', '0001_initial')]
operations = [migrations.CreateModel(name='history', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('uname', models.CharField(max_length=
50, verbose_name='用户名')), ('uword', models.CharField(max_length=50,
verbose_name='单词')), ('time', models.DateTimeField(auto_now=True,
verbose_name='查询时间')), ('isban', models.BooleanField(default=False,
verbose_name='禁用')), ('isdelete', models.BooleanField(default=False,
verbose_name='删除'))])]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-07-21 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uname', models.CharField(max_length=50, verbose_name='用户名')),
('uword', models.CharField(max_length=50, verbose_name='单词')),
('time', models.DateTimeField(auto_now=True, verbose_name='查询时间')),
('isban', models.BooleanField(default=False, verbose_name='禁用')),
('isdelete', models.BooleanField(default=False, verbose_name='删除')),
],
),
]
|
flexible
|
{
"blob_id": "722739086d2777085fdbfdbddef205aaf025580d",
"index": 4291,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user', '0001_initial')]\n operations = [migrations.CreateModel(name='history', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('uname', models.CharField(max_length=\n 50, verbose_name='用户名')), ('uword', models.CharField(max_length=50,\n verbose_name='单词')), ('time', models.DateTimeField(auto_now=True,\n verbose_name='查询时间')), ('isban', models.BooleanField(default=False,\n verbose_name='禁用')), ('isdelete', models.BooleanField(default=False,\n verbose_name='删除'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user', '0001_initial')]\n operations = [migrations.CreateModel(name='history', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('uname', models.CharField(max_length=\n 50, verbose_name='用户名')), ('uword', models.CharField(max_length=50,\n verbose_name='单词')), ('time', models.DateTimeField(auto_now=True,\n verbose_name='查询时间')), ('isban', models.BooleanField(default=False,\n verbose_name='禁用')), ('isdelete', models.BooleanField(default=False,\n verbose_name='删除'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.8 on 2018-07-21 12:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='history',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('uname', models.CharField(max_length=50, verbose_name='用户名')),\n ('uword', models.CharField(max_length=50, verbose_name='单词')),\n ('time', models.DateTimeField(auto_now=True, verbose_name='查询时间')),\n ('isban', models.BooleanField(default=False, verbose_name='禁用')),\n ('isdelete', models.BooleanField(default=False, verbose_name='删除')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main():
print('Output')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
print('Output')
<|reserved_special_token_0|>
if __name__ == '__main__':
main()
<|reserved_special_token_0|>
print('Run time: {}'.format(end - start))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
print('Output')
start = time.time()
if __name__ == '__main__':
main()
end = time.time()
print('Run time: {}'.format(end - start))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import time
def main():
print('Output')
start = time.time()
if __name__ == '__main__':
main()
end = time.time()
print('Run time: {}'.format(end - start))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Project Euler - Problem XX
...
"""
# Imports
import time
# Global variables
# Lamda functions
# Functions
# Main functions
def main():
print('Output')
# Execute code
start = time.time()
if __name__ == "__main__":
main()
end = time.time()
print('Run time: {}'.format(end - start))
|
flexible
|
{
"blob_id": "cdb07241e08f8ac85a427c5b2bc3effca3917c85",
"index": 2188,
"step-1": "<mask token>\n\n\ndef main():\n print('Output')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n print('Output')\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n<mask token>\nprint('Run time: {}'.format(end - start))\n",
"step-3": "<mask token>\n\n\ndef main():\n print('Output')\n\n\nstart = time.time()\nif __name__ == '__main__':\n main()\nend = time.time()\nprint('Run time: {}'.format(end - start))\n",
"step-4": "<mask token>\nimport time\n\n\ndef main():\n print('Output')\n\n\nstart = time.time()\nif __name__ == '__main__':\n main()\nend = time.time()\nprint('Run time: {}'.format(end - start))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler - Problem XX\n...\n\"\"\"\n\n# Imports\nimport time\n\n# Global variables\n\n# Lamda functions\n\n# Functions\n\n# Main functions\ndef main():\n print('Output') \n\n# Execute code\nstart = time.time()\nif __name__ == \"__main__\":\n main()\nend = time.time()\nprint('Run time: {}'.format(end - start)) \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['average', 'extract_ocean_scalar', 'git', 'gmeantools', 'merge',
'netcdf', 'xrtools']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from . import average
from . import extract_ocean_scalar
from . import git
from . import gmeantools
from . import merge
from . import netcdf
from . import xrtools
__all__ = ['average', 'extract_ocean_scalar', 'git', 'gmeantools', 'merge',
'netcdf', 'xrtools']
<|reserved_special_token_1|>
"""Generic utilities module"""
from . import average
from . import extract_ocean_scalar
from . import git
from . import gmeantools
from . import merge
from . import netcdf
from . import xrtools
__all__ = [
"average",
"extract_ocean_scalar",
"git",
"gmeantools",
"merge",
"netcdf",
"xrtools",
]
|
flexible
|
{
"blob_id": "ab6450ee9038e0c58ca8becf6d2518d5e00b9c90",
"index": 9393,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['average', 'extract_ocean_scalar', 'git', 'gmeantools', 'merge',\n 'netcdf', 'xrtools']\n",
"step-3": "<mask token>\nfrom . import average\nfrom . import extract_ocean_scalar\nfrom . import git\nfrom . import gmeantools\nfrom . import merge\nfrom . import netcdf\nfrom . import xrtools\n__all__ = ['average', 'extract_ocean_scalar', 'git', 'gmeantools', 'merge',\n 'netcdf', 'xrtools']\n",
"step-4": "\"\"\"Generic utilities module\"\"\"\n\nfrom . import average\nfrom . import extract_ocean_scalar\nfrom . import git\nfrom . import gmeantools\nfrom . import merge\nfrom . import netcdf\nfrom . import xrtools\n\n__all__ = [\n \"average\",\n \"extract_ocean_scalar\",\n \"git\",\n \"gmeantools\",\n \"merge\",\n \"netcdf\",\n \"xrtools\",\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)
cv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)
cv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)
<|reserved_special_token_0|>
cv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.
LINE_AA)
cv2.imshow('Draw Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
image = np.zeros((512, 512, 3), np.uint8)
cv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)
cv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)
cv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.
LINE_AA)
cv2.imshow('Draw Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
image = np.zeros((512, 512, 3), np.uint8)
cv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)
cv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)
cv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.
LINE_AA)
cv2.imshow('Draw Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
#!/usr/bin/env python
import numpy as np
import cv2
# Creat a Image with Pixel 512x512 RGB
image = np.zeros((512, 512, 3), np.uint8)
# Pt Definition
# x0y0, x1y0, x2 y0
# x0y1 , x1y1, x2y1
# Draw a Line in the Middle of the image
# Start Co-ordinate end Co-ordinate While Color and Line Width
cv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)
cv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)
# Draw Rectange
cv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(image, "ROS OpenCV", (10, 500),
font, 2, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow("Draw Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "f6c5c2180a1a4b05b3f103c330b455e7387713a6",
"index": 8125,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\n<mask token>\ncv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.\n LINE_AA)\ncv2.imshow('Draw Image', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\nfont = cv2.FONT_HERSHEY_COMPLEX\ncv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.\n LINE_AA)\ncv2.imshow('Draw Image', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\nfont = cv2.FONT_HERSHEY_COMPLEX\ncv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.\n LINE_AA)\ncv2.imshow('Draw Image', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "#!/usr/bin/env python\n\nimport numpy as np\nimport cv2\n\n# Creat a Image with Pixel 512x512 RGB\nimage = np.zeros((512, 512, 3), np.uint8)\n\n\n# Pt Definition\n# x0y0, x1y0, x2 y0\n# x0y1 , x1y1, x2y1\n# Draw a Line in the Middle of the image\n# Start Co-ordinate end Co-ordinate While Color and Line Width\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\n\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\n\n# Draw Rectange\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\n\nfont = cv2.FONT_HERSHEY_COMPLEX\ncv2.putText(image, \"ROS OpenCV\", (10, 500),\n font, 2, (255, 0, 0), 2, cv2.LINE_AA)\n\ncv2.imshow(\"Draw Image\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from os import listdir
from os.path import isfile, join
from datetime import date
mypath = '/Users/kachunfung/python/codewars/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
py_removed = [i.replace('.py','') for i in onlyfiles]
file_counter_removed = py_removed.remove('file_counter')
day_removed = max([int(j.replace('day','')) for j in py_removed])
d0 = date(2016, 11, 7)
d1 = date.today()
delta = d1 - d0
if day_removed >= delta.days:
print "Well done!\nYou are %s days ahead.\nKeep up the good work! I am proud of you." % (day_removed - delta.days)
else:
print "You are %s days behind schedule.\nTry your best and Never give up!" % (delta.days - day_removed)
print "\nYou have completed %s codewars kata since 7th December 2016" % day_removed
|
normal
|
{
"blob_id": "592d5074eeca74a5845d26ee2ca6aba8c3d0f989",
"index": 8929,
"step-1": "from os import listdir\nfrom os.path import isfile, join\nfrom datetime import date\n\nmypath = '/Users/kachunfung/python/codewars/'\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\npy_removed = [i.replace('.py','') for i in onlyfiles]\nfile_counter_removed = py_removed.remove('file_counter')\nday_removed = max([int(j.replace('day','')) for j in py_removed])\n\nd0 = date(2016, 11, 7)\nd1 = date.today()\ndelta = d1 - d0\n\nif day_removed >= delta.days:\n print \"Well done!\\nYou are %s days ahead.\\nKeep up the good work! I am proud of you.\" % (day_removed - delta.days)\nelse:\n print \"You are %s days behind schedule.\\nTry your best and Never give up!\" % (delta.days - day_removed)\nprint \"\\nYou have completed %s codewars kata since 7th December 2016\" % day_removed\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Scrapper:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scrapper:
<|reserved_special_token_0|>
def scrapper(prov):
scrapper = importlib.import_module('scrappers.{}'.format(prov))
return scrapper.scrape()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scrapper:
def get_pos(str_lf, str_rg, text):
left = text.find(str_lf)
right = text.rfind(str_rg)
return left, right
def scrapper(prov):
scrapper = importlib.import_module('scrappers.{}'.format(prov))
return scrapper.scrape()
<|reserved_special_token_1|>
import importlib
class Scrapper:
def get_pos(str_lf, str_rg, text):
left = text.find(str_lf)
right = text.rfind(str_rg)
return left, right
def scrapper(prov):
scrapper = importlib.import_module('scrappers.{}'.format(prov))
return scrapper.scrape()
|
flexible
|
{
"blob_id": "67e06b6dddbd3f26295eaff921d1ad4a8b0e5487",
"index": 5580,
"step-1": "<mask token>\n\n\nclass Scrapper:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Scrapper:\n <mask token>\n\n def scrapper(prov):\n scrapper = importlib.import_module('scrappers.{}'.format(prov))\n return scrapper.scrape()\n",
"step-3": "<mask token>\n\n\nclass Scrapper:\n\n def get_pos(str_lf, str_rg, text):\n left = text.find(str_lf)\n right = text.rfind(str_rg)\n return left, right\n\n def scrapper(prov):\n scrapper = importlib.import_module('scrappers.{}'.format(prov))\n return scrapper.scrape()\n",
"step-4": "import importlib\n\n\nclass Scrapper:\n\n def get_pos(str_lf, str_rg, text):\n left = text.find(str_lf)\n right = text.rfind(str_rg)\n return left, right\n\n def scrapper(prov):\n scrapper = importlib.import_module('scrappers.{}'.format(prov))\n return scrapper.scrape()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO)
<|reserved_special_token_0|>
for extension in rdf_file_extension.keys():
files_to_check = '**/*' + extension
for filename in glob.iglob(root_path + files_to_check, recursive=True):
logging.info('Validating file ' + filename)
try:
g = utility.parseGraph(filename, rdf_file_extension[extension])
content = utility.readFile(filename)
declared_prefixes = utility.getDeclaredPrefixesRegex(content,
regex_prefix[extension], regex_url[extension],
regex_splitter[extension])
duplicated_prefixes = utility.findDuplicates(declared_prefixes)
if len(duplicated_prefixes) > 0:
msg = utility.getErrorMessage(duplicated_prefixes)
raise Exception('Duplicated prefix declaration: {}'.format(msg)
)
if extension == '.ttl':
content = re.sub('@prefix(.*?)\\n', '', content)
unused_prefixes = utility.getUnusedPrefixesRegex(
declared_prefixes, content)
elif extension == '.rdf':
used_prefixes = utility.getUsedPrefixesRDF(g)
unused_prefixes = utility.getUnusedPrefixesRDF(
declared_prefixes, used_prefixes)
if len(unused_prefixes) > 0:
msg = utility.getErrorMessage(unused_prefixes)
raise Exception('Unused prefixes:\n {}'.format(msg))
except Exception as e:
logging.error(e)
logging.error('Syntaxic error reading turtle file [' + filename +
']')
sys.exit(1)
print('Files syntaxic validation is successful')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO)
root_path = '../'
rdf_file_extension = {'.ttl': 'turtle', '.nt': 'nt', '.rdf':
'application/rdf+xml'}
regex_prefix = {'.ttl': '@prefix(.*?)\\n', '.rdf': 'xmlns:(.*?)\\n'}
regex_url = {'.ttl': '\\<(.*?)\\>', '.rdf': '\\"(.*?)\\"'}
regex_splitter = {'.ttl': ':', '.nt': 'nt', '.rdf': '='}
for extension in rdf_file_extension.keys():
files_to_check = '**/*' + extension
for filename in glob.iglob(root_path + files_to_check, recursive=True):
logging.info('Validating file ' + filename)
try:
g = utility.parseGraph(filename, rdf_file_extension[extension])
content = utility.readFile(filename)
declared_prefixes = utility.getDeclaredPrefixesRegex(content,
regex_prefix[extension], regex_url[extension],
regex_splitter[extension])
duplicated_prefixes = utility.findDuplicates(declared_prefixes)
if len(duplicated_prefixes) > 0:
msg = utility.getErrorMessage(duplicated_prefixes)
raise Exception('Duplicated prefix declaration: {}'.format(msg)
)
if extension == '.ttl':
content = re.sub('@prefix(.*?)\\n', '', content)
unused_prefixes = utility.getUnusedPrefixesRegex(
declared_prefixes, content)
elif extension == '.rdf':
used_prefixes = utility.getUsedPrefixesRDF(g)
unused_prefixes = utility.getUnusedPrefixesRDF(
declared_prefixes, used_prefixes)
if len(unused_prefixes) > 0:
msg = utility.getErrorMessage(unused_prefixes)
raise Exception('Unused prefixes:\n {}'.format(msg))
except Exception as e:
logging.error(e)
logging.error('Syntaxic error reading turtle file [' + filename +
']')
sys.exit(1)
print('Files syntaxic validation is successful')
<|reserved_special_token_1|>
import glob
import logging
import sys
import Utility as utility
import re
logging.basicConfig(level=logging.INFO)
root_path = '../'
rdf_file_extension = {'.ttl': 'turtle', '.nt': 'nt', '.rdf':
'application/rdf+xml'}
regex_prefix = {'.ttl': '@prefix(.*?)\\n', '.rdf': 'xmlns:(.*?)\\n'}
regex_url = {'.ttl': '\\<(.*?)\\>', '.rdf': '\\"(.*?)\\"'}
regex_splitter = {'.ttl': ':', '.nt': 'nt', '.rdf': '='}
for extension in rdf_file_extension.keys():
files_to_check = '**/*' + extension
for filename in glob.iglob(root_path + files_to_check, recursive=True):
logging.info('Validating file ' + filename)
try:
g = utility.parseGraph(filename, rdf_file_extension[extension])
content = utility.readFile(filename)
declared_prefixes = utility.getDeclaredPrefixesRegex(content,
regex_prefix[extension], regex_url[extension],
regex_splitter[extension])
duplicated_prefixes = utility.findDuplicates(declared_prefixes)
if len(duplicated_prefixes) > 0:
msg = utility.getErrorMessage(duplicated_prefixes)
raise Exception('Duplicated prefix declaration: {}'.format(msg)
)
if extension == '.ttl':
content = re.sub('@prefix(.*?)\\n', '', content)
unused_prefixes = utility.getUnusedPrefixesRegex(
declared_prefixes, content)
elif extension == '.rdf':
used_prefixes = utility.getUsedPrefixesRDF(g)
unused_prefixes = utility.getUnusedPrefixesRDF(
declared_prefixes, used_prefixes)
if len(unused_prefixes) > 0:
msg = utility.getErrorMessage(unused_prefixes)
raise Exception('Unused prefixes:\n {}'.format(msg))
except Exception as e:
logging.error(e)
logging.error('Syntaxic error reading turtle file [' + filename +
']')
sys.exit(1)
print('Files syntaxic validation is successful')
<|reserved_special_token_1|>
#Checks if all declared prefixes are used in the RDF File
import glob
import logging
import sys
import Utility as utility
import re
# set log level
logging.basicConfig(level=logging.INFO)
root_path = "../"
rdf_file_extension = {".ttl":"turtle", ".nt":"nt", ".rdf":"application/rdf+xml"}
regex_prefix = {".ttl": r'@prefix(.*?)\n', ".rdf": r'xmlns:(.*?)\n'}
regex_url = {".ttl": r'\<(.*?)\>', ".rdf": r'\"(.*?)\"'}
regex_splitter = {".ttl": ":", ".nt":"nt", ".rdf":"="}
for extension in rdf_file_extension.keys() :
files_to_check = "**/*" + extension
for filename in glob.iglob(root_path + files_to_check, recursive=True):
logging.info("Validating file " + filename)
try:
#Parse file using rdflib
g = utility.parseGraph(filename, rdf_file_extension[extension])
#Read File
content = utility.readFile(filename)
#Get Declared prefixes
declared_prefixes = utility.getDeclaredPrefixesRegex(content, regex_prefix[extension], regex_url[extension], regex_splitter[extension])
#Check redundant declaration
duplicated_prefixes = utility.findDuplicates(declared_prefixes)
#If redundant, raise exception
if len(duplicated_prefixes) > 0:
msg = utility.getErrorMessage(duplicated_prefixes)
raise Exception("Duplicated prefix declaration: {}".format(msg))
if(extension == '.ttl'):
#Remove prefixes from content
content = re.sub(r'@prefix(.*?)\n', '', content)
#Check for prefix usage
unused_prefixes = utility.getUnusedPrefixesRegex(declared_prefixes, content)
elif(extension == '.rdf'):
#Check for prefix usage
used_prefixes = utility.getUsedPrefixesRDF(g)
unused_prefixes = utility.getUnusedPrefixesRDF(declared_prefixes, used_prefixes)
#If there are unused prefixes, raise exception
if len(unused_prefixes) > 0:
msg = utility.getErrorMessage(unused_prefixes)
raise Exception("Unused prefixes:\n {}".format(msg))
except Exception as e:
logging.error(e)
logging.error("Syntaxic error reading turtle file [" +filename+"]")
sys.exit(1)
print("Files syntaxic validation is successful")
|
flexible
|
{
"blob_id": "fe406f40b48bf4982e7a48737b6b30514ae1fa71",
"index": 7915,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n<mask token>\nfor extension in rdf_file_extension.keys():\n files_to_check = '**/*' + extension\n for filename in glob.iglob(root_path + files_to_check, recursive=True):\n logging.info('Validating file ' + filename)\n try:\n g = utility.parseGraph(filename, rdf_file_extension[extension])\n content = utility.readFile(filename)\n declared_prefixes = utility.getDeclaredPrefixesRegex(content,\n regex_prefix[extension], regex_url[extension],\n regex_splitter[extension])\n duplicated_prefixes = utility.findDuplicates(declared_prefixes)\n if len(duplicated_prefixes) > 0:\n msg = utility.getErrorMessage(duplicated_prefixes)\n raise Exception('Duplicated prefix declaration: {}'.format(msg)\n )\n if extension == '.ttl':\n content = re.sub('@prefix(.*?)\\\\n', '', content)\n unused_prefixes = utility.getUnusedPrefixesRegex(\n declared_prefixes, content)\n elif extension == '.rdf':\n used_prefixes = utility.getUsedPrefixesRDF(g)\n unused_prefixes = utility.getUnusedPrefixesRDF(\n declared_prefixes, used_prefixes)\n if len(unused_prefixes) > 0:\n msg = utility.getErrorMessage(unused_prefixes)\n raise Exception('Unused prefixes:\\n {}'.format(msg))\n except Exception as e:\n logging.error(e)\n logging.error('Syntaxic error reading turtle file [' + filename +\n ']')\n sys.exit(1)\nprint('Files syntaxic validation is successful')\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO)\nroot_path = '../'\nrdf_file_extension = {'.ttl': 'turtle', '.nt': 'nt', '.rdf':\n 'application/rdf+xml'}\nregex_prefix = {'.ttl': '@prefix(.*?)\\\\n', '.rdf': 'xmlns:(.*?)\\\\n'}\nregex_url = {'.ttl': '\\\\<(.*?)\\\\>', '.rdf': '\\\\\"(.*?)\\\\\"'}\nregex_splitter = {'.ttl': ':', '.nt': 'nt', '.rdf': '='}\nfor extension in rdf_file_extension.keys():\n files_to_check = '**/*' + extension\n for filename in glob.iglob(root_path + files_to_check, recursive=True):\n logging.info('Validating file ' + filename)\n try:\n g = utility.parseGraph(filename, rdf_file_extension[extension])\n content = utility.readFile(filename)\n declared_prefixes = utility.getDeclaredPrefixesRegex(content,\n regex_prefix[extension], regex_url[extension],\n regex_splitter[extension])\n duplicated_prefixes = utility.findDuplicates(declared_prefixes)\n if len(duplicated_prefixes) > 0:\n msg = utility.getErrorMessage(duplicated_prefixes)\n raise Exception('Duplicated prefix declaration: {}'.format(msg)\n )\n if extension == '.ttl':\n content = re.sub('@prefix(.*?)\\\\n', '', content)\n unused_prefixes = utility.getUnusedPrefixesRegex(\n declared_prefixes, content)\n elif extension == '.rdf':\n used_prefixes = utility.getUsedPrefixesRDF(g)\n unused_prefixes = utility.getUnusedPrefixesRDF(\n declared_prefixes, used_prefixes)\n if len(unused_prefixes) > 0:\n msg = utility.getErrorMessage(unused_prefixes)\n raise Exception('Unused prefixes:\\n {}'.format(msg))\n except Exception as e:\n logging.error(e)\n logging.error('Syntaxic error reading turtle file [' + filename +\n ']')\n sys.exit(1)\nprint('Files syntaxic validation is successful')\n",
"step-4": "import glob\nimport logging\nimport sys\nimport Utility as utility\nimport re\nlogging.basicConfig(level=logging.INFO)\nroot_path = '../'\nrdf_file_extension = {'.ttl': 'turtle', '.nt': 'nt', '.rdf':\n 'application/rdf+xml'}\nregex_prefix = {'.ttl': '@prefix(.*?)\\\\n', '.rdf': 'xmlns:(.*?)\\\\n'}\nregex_url = {'.ttl': '\\\\<(.*?)\\\\>', '.rdf': '\\\\\"(.*?)\\\\\"'}\nregex_splitter = {'.ttl': ':', '.nt': 'nt', '.rdf': '='}\nfor extension in rdf_file_extension.keys():\n files_to_check = '**/*' + extension\n for filename in glob.iglob(root_path + files_to_check, recursive=True):\n logging.info('Validating file ' + filename)\n try:\n g = utility.parseGraph(filename, rdf_file_extension[extension])\n content = utility.readFile(filename)\n declared_prefixes = utility.getDeclaredPrefixesRegex(content,\n regex_prefix[extension], regex_url[extension],\n regex_splitter[extension])\n duplicated_prefixes = utility.findDuplicates(declared_prefixes)\n if len(duplicated_prefixes) > 0:\n msg = utility.getErrorMessage(duplicated_prefixes)\n raise Exception('Duplicated prefix declaration: {}'.format(msg)\n )\n if extension == '.ttl':\n content = re.sub('@prefix(.*?)\\\\n', '', content)\n unused_prefixes = utility.getUnusedPrefixesRegex(\n declared_prefixes, content)\n elif extension == '.rdf':\n used_prefixes = utility.getUsedPrefixesRDF(g)\n unused_prefixes = utility.getUnusedPrefixesRDF(\n declared_prefixes, used_prefixes)\n if len(unused_prefixes) > 0:\n msg = utility.getErrorMessage(unused_prefixes)\n raise Exception('Unused prefixes:\\n {}'.format(msg))\n except Exception as e:\n logging.error(e)\n logging.error('Syntaxic error reading turtle file [' + filename +\n ']')\n sys.exit(1)\nprint('Files syntaxic validation is successful')\n",
"step-5": "#Checks if all declared prefixes are used in the RDF File\n\nimport glob\nimport logging\nimport sys\nimport Utility as utility\nimport re\n\n# set log level\nlogging.basicConfig(level=logging.INFO)\n\nroot_path = \"../\"\n\nrdf_file_extension = {\".ttl\":\"turtle\", \".nt\":\"nt\", \".rdf\":\"application/rdf+xml\"}\nregex_prefix = {\".ttl\": r'@prefix(.*?)\\n', \".rdf\": r'xmlns:(.*?)\\n'}\nregex_url = {\".ttl\": r'\\<(.*?)\\>', \".rdf\": r'\\\"(.*?)\\\"'}\nregex_splitter = {\".ttl\": \":\", \".nt\":\"nt\", \".rdf\":\"=\"}\n\nfor extension in rdf_file_extension.keys() :\n\tfiles_to_check = \"**/*\" + extension\n\t\t\n\tfor filename in glob.iglob(root_path + files_to_check, recursive=True):\n\t\tlogging.info(\"Validating file \" + filename)\n\n\t\ttry:\n\t\t\t#Parse file using rdflib\n\t\t\tg = utility.parseGraph(filename, rdf_file_extension[extension])\n\n\t\t\t#Read File\n\t\t\tcontent = utility.readFile(filename)\n\n\t\t\t#Get Declared prefixes\n\t\t\tdeclared_prefixes = utility.getDeclaredPrefixesRegex(content, regex_prefix[extension], regex_url[extension], regex_splitter[extension])\n\n\t\t\t#Check redundant declaration\n\t\t\tduplicated_prefixes = utility.findDuplicates(declared_prefixes)\n\t\t\t\n\t\t\t#If redundant, raise exception\n\t\t\tif len(duplicated_prefixes) > 0:\n\t\t\t\tmsg = utility.getErrorMessage(duplicated_prefixes)\n\t\t\t\traise Exception(\"Duplicated prefix declaration: {}\".format(msg))\n\n\t\t\tif(extension == '.ttl'):\n\t\t\t\t#Remove prefixes from content\n\t\t\t\tcontent = re.sub(r'@prefix(.*?)\\n', '', content)\n\n\t\t\t\t#Check for prefix usage\n\t\t\t\tunused_prefixes = utility.getUnusedPrefixesRegex(declared_prefixes, content)\n\n\t\t\telif(extension == '.rdf'):\n\t\t\t\t#Check for prefix usage\n\t\t\t\tused_prefixes = utility.getUsedPrefixesRDF(g)\n\t\t\t\tunused_prefixes = utility.getUnusedPrefixesRDF(declared_prefixes, used_prefixes)\n\n\t\t\t#If there are unused prefixes, raise exception\n\t\t\tif len(unused_prefixes) > 0:\n\t\t\t\tmsg = utility.getErrorMessage(unused_prefixes)\n\t\t\t\traise Exception(\"Unused prefixes:\\n {}\".format(msg))\n\n\t\texcept Exception as e:\n\t\t\t\tlogging.error(e)\n\t\t\t\tlogging.error(\"Syntaxic error reading turtle file [\" +filename+\"]\")\n\t\t\t\tsys.exit(1)\n\nprint(\"Files syntaxic validation is successful\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .compat import reverse, action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework import pagination
from rest_framework import renderers
from . import registry
from .serializers import RunSerializer, RecordSerializer
from .models import Run
from .settings import import_setting
class PageNumberPagination(pagination.PageNumberPagination):
page_size = 50
class RunViewSet(ModelViewSet):
serializer_class = RunSerializer
pagination_class = PageNumberPagination
renderer_classes = [
renderers.TemplateHTMLRenderer,
renderers.JSONRenderer,
renderers.BrowsableAPIRenderer,
]
authentication_classes = [
import_setting('AUTHENTICATION'),
]
permission_classes = [
import_setting('PERMISSION'),
]
record_serializer_class = RecordSerializer
queryset = Run.objects.all()
@property
def backend(self):
from . import backend as data_wizard_backend
return data_wizard_backend
@property
def template_name(self):
if self.action == 'retrieve':
template = 'detail'
else:
template = self.action
return 'data_wizard/run_{}.html'.format(template)
def get_renderers(self):
if self.action == 'status':
return [renderers.JSONRenderer()]
else:
return super(RunViewSet, self).get_renderers()
@action(detail=True)
def status(self, request, *args, **kwargs):
task_id = request.GET.get('task', None)
result = self.backend.get_async_status(task_id)
status = result.get('status', 'UNKNOWN')
action = result.get('action', None)
if not action and status == 'SUCCESS':
action = 'records'
if action:
result['location'] = self.get_action_url(action)
elif status == 'FAILURE' and not result.get('error'):
result['error'] = "Unknown Error"
result['status'] = status
return Response(result)
_namespace = 'data_wizard'
def get_action_url(self, action):
name = self._namespace + ':run-' + action
return reverse(name, kwargs={'pk': self.get_object().pk})
def run_task(self, name, use_async=False, post=None):
run = self.get_object()
return run.run_task(
name,
use_async=use_async,
post=post,
backend=self.backend,
user=self.request.user
)
def retrieve_and_run(self, task_name, use_async=False, post=None):
response = self.retrieve(self.request, **self.kwargs)
result = self.run_task(task_name, use_async, post)
response.data.update(result)
return response
@action(detail=True)
def serializers(self, request, *args, **kwargs):
response = self.retrieve(request, **self.kwargs)
response.data['serializer_choices'] = [
{
'name': s['class_name'],
'label': s['name'],
} for s in registry.get_serializers()
if s['options'].get('show_in_list', True)
]
return response
@action(detail=True, methods=['post'])
def updateserializer(self, request, *args, **kwargs):
run = self.get_object()
self.action = 'serializers'
name = request.POST.get('serializer', None)
if name and registry.get_serializer(name):
run.serializer = name
run.save()
run.add_event('update_serializer')
return self.serializers(request)
@action(detail=True)
def columns(self, request, *args, **kwargs):
return self.retrieve_and_run('read_columns')
@action(detail=True, methods=['post'])
def updatecolumns(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_columns')
self.action = 'columns'
result = self.run_task('update_columns', post=request.POST)
response.data.update(result)
return response
@action(detail=True)
def ids(self, request, *args, **kwargs):
return self.retrieve_and_run('read_row_identifiers')
@action(detail=True, methods=['post'])
def updateids(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_row_identifiers')
self.action = 'ids'
result = self.run_task('update_row_identifiers', post=request.POST)
response.data.update(result)
return response
@action(detail=True, methods=['post'])
def data(self, request, *args, **kwargs):
return self.retrieve_and_run('import_data', use_async=True)
@action(detail=True, methods=['post', 'get'])
def auto(self, request, *args, **kwargs):
if request.method == 'GET':
response = self.retrieve(request, **kwargs)
task_id = request.GET.get('task', None)
if task_id:
response.data['task_id'] = task_id
else:
self.action = 'retrieve'
return response
return self.retrieve_and_run('auto_import', use_async=True)
@action(detail=True)
def records(self, request, *args, **kwargs):
response = self.retrieve(self.request, **kwargs)
response.data['records'] = self.record_serializer_class(
self.get_object().record_set.all(),
many=True
).data
return response
|
normal
|
{
"blob_id": "11a0c3307994a90d1d4de67d442ffa355e11e13b",
"index": 6836,
"step-1": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n <mask token>\n <mask token>\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-2": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n <mask token>\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = 'Unknown Error'\n result['status'] = status\n return Response(result)\n <mask token>\n <mask token>\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n <mask token>\n <mask token>\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-3": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n\n def get_renderers(self):\n if self.action == 'status':\n return [renderers.JSONRenderer()]\n else:\n return super(RunViewSet, self).get_renderers()\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = 'Unknown Error'\n result['status'] = status\n return Response(result)\n <mask token>\n\n def get_action_url(self, action):\n name = self._namespace + ':run-' + action\n return reverse(name, kwargs={'pk': self.get_object().pk})\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n\n @action(detail=True, methods=['post'])\n def updatecolumns(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_columns')\n self.action = 'columns'\n result = self.run_task('update_columns', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def ids(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_row_identifiers')\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-4": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n serializer_class = RunSerializer\n pagination_class = PageNumberPagination\n renderer_classes = [renderers.TemplateHTMLRenderer, renderers.\n JSONRenderer, renderers.BrowsableAPIRenderer]\n authentication_classes = [import_setting('AUTHENTICATION')]\n permission_classes = [import_setting('PERMISSION')]\n record_serializer_class = RecordSerializer\n queryset = Run.objects.all()\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n\n def get_renderers(self):\n if self.action == 'status':\n return [renderers.JSONRenderer()]\n else:\n return super(RunViewSet, self).get_renderers()\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = 'Unknown Error'\n result['status'] = status\n return Response(result)\n _namespace = 'data_wizard'\n\n def get_action_url(self, action):\n name = self._namespace + ':run-' + action\n return reverse(name, kwargs={'pk': self.get_object().pk})\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n\n @action(detail=True, methods=['post'])\n def updatecolumns(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_columns')\n self.action = 'columns'\n result = self.run_task('update_columns', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def ids(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_row_identifiers')\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-5": "from .compat import reverse, action\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework import pagination\nfrom rest_framework import renderers\nfrom . import registry\nfrom .serializers import RunSerializer, RecordSerializer\nfrom .models import Run\nfrom .settings import import_setting\n\n\nclass PageNumberPagination(pagination.PageNumberPagination):\n page_size = 50\n\n\nclass RunViewSet(ModelViewSet):\n serializer_class = RunSerializer\n pagination_class = PageNumberPagination\n renderer_classes = [\n renderers.TemplateHTMLRenderer,\n renderers.JSONRenderer,\n renderers.BrowsableAPIRenderer,\n ]\n authentication_classes = [\n import_setting('AUTHENTICATION'),\n ]\n permission_classes = [\n import_setting('PERMISSION'),\n ]\n record_serializer_class = RecordSerializer\n queryset = Run.objects.all()\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n\n def get_renderers(self):\n if self.action == 'status':\n return [renderers.JSONRenderer()]\n else:\n return super(RunViewSet, self).get_renderers()\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = \"Unknown Error\"\n result['status'] = status\n return Response(result)\n\n _namespace = 'data_wizard'\n\n def get_action_url(self, action):\n name = self._namespace + ':run-' + action\n return reverse(name, kwargs={'pk': self.get_object().pk})\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(\n name,\n use_async=use_async,\n post=post,\n backend=self.backend,\n user=self.request.user\n )\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [\n {\n 'name': s['class_name'],\n 'label': s['name'],\n } for s in registry.get_serializers()\n if s['options'].get('show_in_list', True)\n ]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n\n @action(detail=True, methods=['post'])\n def updatecolumns(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_columns')\n self.action = 'columns'\n result = self.run_task('update_columns', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def ids(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_row_identifiers')\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(\n self.get_object().record_set.all(),\n many=True\n ).data\n return response\n",
"step-ids": [
11,
13,
17,
18,
22
]
}
|
[
11,
13,
17,
18,
22
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
def cal_sum(self, root, L, R, result):
if not root:
return result
left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)
right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)
if root.val < L or root.val > R:
return left + right
return left + right + root.val
<|reserved_special_token_1|>
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) ->int:
result = self.cal_sum(root, L, R, 0)
return result
def cal_sum(self, root, L, R, result):
if not root:
return result
left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)
right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)
if root.val < L or root.val > R:
return left + right
return left + right + root.val
<|reserved_special_token_1|>
# 938. Range Sum of BST
# Share
# Given the root node of a binary search tree, return the sum of values of all nodes with value between L and R (inclusive).
# The binary search tree is guaranteed to have unique values.
# Example 1:
# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15
# Output: 32
# Example 2:
# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10
# Output: 23
# Note:
# The number of nodes in the tree is at most 10000.
# The final answer is guaranteed to be less than 2^31.
# class Solution:
# def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
# result = self.cal_sum(root, L, R, 0)
# return result
# def cal_sum(self, root, L, R, result):
# if not root:
# return result
# left = self.cal_sum(root.left, L, R, result)
# right = self.cal_sum(root.right, L, R, result)
# if root.val < L or root.val > R:
# return left + right
# return left + right + root.val
# Better Solution
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
result = self.cal_sum(root, L, R, 0)
return result
def cal_sum(self, root, L, R, result):
if not root:
return result
left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)
right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)
if root.val < L or root.val > R:
return left + right
return left + right + root.val
|
flexible
|
{
"blob_id": "8e1de62f2490d2276a834ae1ab0f1958649fa821",
"index": 5503,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n <mask token>\n",
"step-3": "class Solution:\n <mask token>\n\n def cal_sum(self, root, L, R, result):\n if not root:\n return result\n left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)\n right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)\n if root.val < L or root.val > R:\n return left + right\n return left + right + root.val\n",
"step-4": "class Solution:\n\n def rangeSumBST(self, root: TreeNode, L: int, R: int) ->int:\n result = self.cal_sum(root, L, R, 0)\n return result\n\n def cal_sum(self, root, L, R, result):\n if not root:\n return result\n left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)\n right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)\n if root.val < L or root.val > R:\n return left + right\n return left + right + root.val\n",
"step-5": "# 938. Range Sum of BST\n\n\n# Share\n# Given the root node of a binary search tree, return the sum of values of all nodes with value between L and R (inclusive).\n\n# The binary search tree is guaranteed to have unique values.\n\n \n\n# Example 1:\n\n# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15\n# Output: 32\n# Example 2:\n\n# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10\n# Output: 23\n \n\n# Note:\n\n# The number of nodes in the tree is at most 10000.\n# The final answer is guaranteed to be less than 2^31.\n\n# class Solution:\n# def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:\n \n# result = self.cal_sum(root, L, R, 0)\n \n# return result\n \n# def cal_sum(self, root, L, R, result):\n \n# if not root:\n# return result\n \n# left = self.cal_sum(root.left, L, R, result)\n# right = self.cal_sum(root.right, L, R, result)\n \n# if root.val < L or root.val > R:\n# return left + right\n \n# return left + right + root.val\n\n\n# Better Solution\nclass Solution:\n def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:\n \n result = self.cal_sum(root, L, R, 0)\n \n return result\n \n def cal_sum(self, root, L, R, result):\n \n if not root:\n return result\n \n \n left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)\n \n right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)\n \n if root.val < L or root.val > R:\n return left + right\n \n return left + right + root.val",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
from domain import *
from fuzzy_set import *
from parser import *
class FuzzyControler(object):
def __init__(self, angle_rules, acc_rules, domains_angle, domains_acc):
self.angle_rules = angle_rules
self.acc_rules = acc_rules
self.domains_angle = domains_angle
self.domains_acc = domains_acc
self.intervals = []
self.intervals.append(0)
i = 1
while i <= 2048:
self.intervals.append(i)
i *= 2
def calculateNewAccAndAngle(self, L, D, LK, DK, V, S):
left = (L, LK, S)
right = (D, DK, S)
left = self.transformToInterval(left)
right = self.transformToInterval(right)
left_acc = (left[0], right[0], V)
right_acc = (left[1], right[1], V)
angle = self.calcAngle(left, right, self.angle_rules, self.domains_angle)
acc = self.calcAcc(left_acc, right_acc, self.acc_rules, self.domains_acc)
sys.stderr.write(str(angle) + " " + str(acc) + "\n")
return acc, angle
def calcAngle(self, left, right, angle_rules, domains):
# print domains
angle_domain = domains["angle_domain"]
cardinality = angle_domain.getCardinality()
new_memberships = []
domain_elements = angle_domain.getElements()
# print "here"
for i in range(cardinality):
#
y = domain_elements[i]
left_elem = left + (y,)
right_elem = right + (y,)
# sys.stderr.write(str(left_elem) + "\n")
# sys.stderr.write(str(right_elem) + "\n")
# print right_elem
max_ = 0
for rule in angle_rules:
# print "here"
# print rule.name
if rule.name.startswith("RULE_LEFT"):
# print "here"
min_ = rule.getMembershipFor(left_elem)
else:
min_ = rule.getMembershipFor(right_elem)
# print min_
if min_ > max_:
max_ = min_
new_memberships.append(max_)
# print self.centerOfArea(new_memberships, domain_elements)
result = int(self.centerOfArea(new_memberships, domain_elements))
return result
def calcAcc(self, left, right, acc_rules, domains):
# print domains
acc_domain = domains["acc_domain"]
cardinality = acc_domain.getCardinality()
new_memberships = []
domain_elements = acc_domain.getElements()
for i in range(cardinality):
#
y = domain_elements[i]
left_elem = left + (y,)
right_elem = right + (y,)
max_ = 0
for rule in acc_rules:
# print "here"
# print rule.name
if rule.name.startswith("RULE_LD"):
# print "here"
min_ = rule.getMembershipFor(left_elem)
else:
sys.stderr.write(str(right_elem) + "\n")
min_ = rule.getMembershipFor(right_elem)
# print min_
if min_ > max_:
max_ = min_
new_memberships.append(max_)
# print self.centerOfArea(new_memberships, domain_elements)
result = int(self.centerOfArea(new_memberships, domain_elements))
return result
def centerOfArea(self, memberships, elements):
# print memberships, elements
# print len(memberships)
result = 0
numerator = 0
denominator = 0
for i in range(len(memberships)):
numerator += memberships[i] * elements[i]
denominator += memberships[i]
if denominator == 0:
return 0
result = float(numerator) / denominator
return result
def transformToInterval(self, elem):
val = list(elem)
for i in range(len(elem)):
for j in range(1, len(self.intervals)):
if elem[i] < self.intervals[j] and elem[i] >= self.intervals[j-1]:
val[i] = self.intervals[j-1]
return tuple(val)
def main():
domains_angle = {}
domains_acc = {}
sets_angle = {}
sets_acc = {}
operators = {}
operators["+"] = ("ZadehS",)
operators["*"] = ("ZadehT",)
operators["!"] = ("ZadehNot",)
operators["->"] = ("'max-min'",)
parser = Parser(sys.argv[1], domains_angle, sets_angle, operators)
parser.parse()
sets_angle = parser.rules
parser = Parser(sys.argv[2], domains_acc, sets_acc, operators)
parser.parse()
sets_acc = parser.rules
controler = FuzzyControler(sets_angle, sets_acc, domains_angle, domains_acc)
while True:
# print "here"
line = sys.stdin.readline()
if line == "KRAJ\n":
break
L,D,LK,DK,V,S = [int(s) for s in line.split() if s.isdigit()]
akcel, kormilo = controler.calculateNewAccAndAngle(L, D, LK, DK, V, S)
print akcel, kormilo
sys.stdout.flush()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "7451b09c54734fb02167d43b96df972420d86853",
"index": 7776,
"step-1": "import sys\n\nfrom domain import *\nfrom fuzzy_set import *\nfrom parser import *\n\nclass FuzzyControler(object):\n\n\tdef __init__(self, angle_rules, acc_rules, domains_angle, domains_acc):\n\n\t\tself.angle_rules = angle_rules\n\t\tself.acc_rules = acc_rules\n\t\tself.domains_angle = domains_angle\n\t\tself.domains_acc = domains_acc\n\n\t\tself.intervals = []\n\t\tself.intervals.append(0)\n\n\t\ti = 1\n\t\twhile i <= 2048:\n\t\t\tself.intervals.append(i)\n\t\t\ti *= 2\n\n\tdef calculateNewAccAndAngle(self, L, D, LK, DK, V, S):\n\n\t\tleft = (L, LK, S)\n\t\tright = (D, DK, S)\n\n\t\tleft = self.transformToInterval(left)\n\t\tright = self.transformToInterval(right)\n\n\t\tleft_acc = (left[0], right[0], V)\n\t\tright_acc = (left[1], right[1], V)\n\n\t\tangle = self.calcAngle(left, right, self.angle_rules, self.domains_angle)\n\t\tacc = self.calcAcc(left_acc, right_acc, self.acc_rules, self.domains_acc)\n\t\tsys.stderr.write(str(angle) + \" \" + str(acc) + \"\\n\")\n\t\treturn acc, angle\n\n\tdef calcAngle(self, left, right, angle_rules, domains):\n\n\t\t# print domains\n\t\tangle_domain = domains[\"angle_domain\"]\n\t\tcardinality = angle_domain.getCardinality()\n\t\tnew_memberships = []\n\t\tdomain_elements = angle_domain.getElements()\n\n\t\t# print \"here\"\n\n\t\tfor i in range(cardinality):\n# \n\t\t\ty = domain_elements[i]\n\n\t\t\tleft_elem = left + (y,)\n\t\t\tright_elem = right + (y,)\n\n\t\t\t# sys.stderr.write(str(left_elem) + \"\\n\")\n\t\t\t# sys.stderr.write(str(right_elem) + \"\\n\")\n\t\t\t# print right_elem\n\n\t\t\tmax_ = 0\n\n\t\t\tfor rule in angle_rules:\n\t\t\t\t# print \"here\"\n\t\t\t\t# print rule.name\n\t\t\t\tif rule.name.startswith(\"RULE_LEFT\"):\n\t\t\t\t\t# print \"here\"\n\t\t\t\t\tmin_ = rule.getMembershipFor(left_elem)\n\t\t\t\telse:\n\t\t\t\t\tmin_ = rule.getMembershipFor(right_elem)\n\n\t\t\t\t# print min_\n\t\t\t\tif min_ > max_:\n\t\t\t\t\tmax_ = min_\n\n\t\t\tnew_memberships.append(max_)\n\n\t\t# print self.centerOfArea(new_memberships, domain_elements)\n\t\tresult = int(self.centerOfArea(new_memberships, domain_elements))\n\t\treturn result\n\n\tdef calcAcc(self, left, right, acc_rules, domains):\n\n\t\t# print domains\n\t\tacc_domain = domains[\"acc_domain\"]\n\t\tcardinality = acc_domain.getCardinality()\n\t\tnew_memberships = []\n\t\tdomain_elements = acc_domain.getElements()\n\n\t\tfor i in range(cardinality):\n# \n\t\t\ty = domain_elements[i]\n\n\t\t\tleft_elem = left + (y,)\n\t\t\tright_elem = right + (y,)\n\n\t\t\tmax_ = 0\n\n\t\t\tfor rule in acc_rules:\n\t\t\t\t# print \"here\"\n\t\t\t\t# print rule.name\n\t\t\t\tif rule.name.startswith(\"RULE_LD\"):\n\t\t\t\t\t# print \"here\"\n\t\t\t\t\tmin_ = rule.getMembershipFor(left_elem)\n\t\t\t\telse:\n\t\t\t\t\tsys.stderr.write(str(right_elem) + \"\\n\")\n\t\t\t\t\tmin_ = rule.getMembershipFor(right_elem)\n\n\t\t\t\t# print min_\n\t\t\t\tif min_ > max_:\n\t\t\t\t\tmax_ = min_\n\n\t\t\tnew_memberships.append(max_)\n\n\t\t# print self.centerOfArea(new_memberships, domain_elements)\n\t\tresult = int(self.centerOfArea(new_memberships, domain_elements))\n\t\treturn result\n\n\tdef centerOfArea(self, memberships, elements):\n\n\t\t# print memberships, elements\n\t\t# print len(memberships)\n\t\tresult = 0\n\t\tnumerator = 0\n\t\tdenominator = 0\n\n\t\tfor i in range(len(memberships)):\n\n\t\t\tnumerator += memberships[i] * elements[i]\n\t\t\tdenominator += memberships[i]\n\n\t\tif denominator == 0:\n\t\t\treturn 0\n\t\tresult = float(numerator) / denominator\n\t\treturn result\n\n\tdef transformToInterval(self, elem):\n\n\t\tval = list(elem)\n\t\tfor i in range(len(elem)):\n\t\t\tfor j in range(1, len(self.intervals)):\n\n\t\t\t\tif elem[i] < self.intervals[j] and elem[i] >= self.intervals[j-1]:\n\t\t\t\t\tval[i] = self.intervals[j-1]\n\n\t\treturn tuple(val)\n\ndef main():\n\n\tdomains_angle = {}\n\tdomains_acc = {}\n\tsets_angle = {}\n\tsets_acc = {}\n\toperators = {}\n\toperators[\"+\"] = (\"ZadehS\",)\n\toperators[\"*\"] = (\"ZadehT\",)\n\toperators[\"!\"] = (\"ZadehNot\",)\n\toperators[\"->\"] = (\"'max-min'\",)\n\n\tparser = Parser(sys.argv[1], domains_angle, sets_angle, operators)\n\tparser.parse()\n\tsets_angle = parser.rules\n\n\tparser = Parser(sys.argv[2], domains_acc, sets_acc, operators)\n\tparser.parse()\n\tsets_acc = parser.rules\n\n\tcontroler = FuzzyControler(sets_angle, sets_acc, domains_angle, domains_acc)\n\n\twhile True:\n\t \t\n\t \t# print \"here\"\n\t\tline = sys.stdin.readline()\n\n\t\tif line == \"KRAJ\\n\":\n\t \t\tbreak\n\n\t\tL,D,LK,DK,V,S = [int(s) for s in line.split() if s.isdigit()]\n\t\takcel, kormilo = controler.calculateNewAccAndAngle(L, D, LK, DK, V, S)\n\n\t\tprint akcel, kormilo\n\t\tsys.stdout.flush()\n\nif __name__ == \"__main__\":\n\tmain()\n\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Sala:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Sala:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return str(self.numero)
<|reserved_special_token_1|>
class Sala:
<|reserved_special_token_0|>
def add_turma(self, turma):
self.Turmas.append(turma)
def __str__(self):
return str(self.numero)
<|reserved_special_token_1|>
class Sala:
def __init__(self, sala):
self.Turmas = []
self.numero = sala
def add_turma(self, turma):
self.Turmas.append(turma)
def __str__(self):
return str(self.numero)
<|reserved_special_token_1|>
class Sala:
def __init__(self, sala):
self.Turmas = []
self.numero = sala
def add_turma(self, turma):
# do things
self.Turmas.append(turma)
def __str__(self):
return str(self.numero)
|
flexible
|
{
"blob_id": "e41df44db92e2ef7f9c20a0f3052e1c8c28b76c7",
"index": 6174,
"step-1": "class Sala:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Sala:\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.numero)\n",
"step-3": "class Sala:\n <mask token>\n\n def add_turma(self, turma):\n self.Turmas.append(turma)\n\n def __str__(self):\n return str(self.numero)\n",
"step-4": "class Sala:\n\n def __init__(self, sala):\n self.Turmas = []\n self.numero = sala\n\n def add_turma(self, turma):\n self.Turmas.append(turma)\n\n def __str__(self):\n return str(self.numero)\n",
"step-5": "class Sala:\n def __init__(self, sala):\n self.Turmas = []\n self.numero = sala\n\n def add_turma(self, turma):\n # do things\n self.Turmas.append(turma)\n\n def __str__(self):\n return str(self.numero)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Sat Dec 17 14:41:56 2011 +0100
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
"""Run tests on the libsvm machine infrastructure.
"""
import os
import numpy
import tempfile
import pkg_resources
import nose.tools
import bob.io.base
from . import File, Machine
def F(f):
"""Returns the test file on the "data" subdirectory"""
return pkg_resources.resource_filename(__name__, os.path.join('data', f))
def tempname(suffix, prefix='bobtest_machine_'):
(fd, name) = tempfile.mkstemp(suffix, prefix)
os.close(fd)
os.unlink(name)
return name
TEST_MACHINE_NO_PROBS = F('heart_no_probs.svmmodel')
HEART_DATA = F('heart.svmdata') #13 inputs
HEART_MACHINE = F('heart.svmmodel') #supports probabilities
HEART_EXPECTED = F('heart.out') #expected probabilities
IRIS_DATA = F('iris.svmdata')
IRIS_MACHINE = F('iris.svmmodel')
IRIS_EXPECTED = F('iris.out') #expected probabilities
def load_expected(filename):
"""Loads libsvm's svm-predict output file with probabilities"""
all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]])
data = numpy.loadtxt(filename, dtype='float64', skiprows=1)
return all_labels, data[:,0].astype('int64'), data[:,1:]
#extracted by running svm-predict.c on the heart_scale example data
expected_heart_predictions = (1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1,
1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1,
-1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1,
-1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1,
1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1,
1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
-1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1,
-1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1,
1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1,
-1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1,
1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1,
-1, -1, -1, -1, -1, -1, 1)
expected_iris_predictions = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3)
def test_can_load():
machine = Machine(HEART_MACHINE)
nose.tools.eq_(machine.shape, (13,1))
nose.tools.eq_(machine.n_support_vectors, [64,68])
nose.tools.eq_(machine.kernel_type, 'RBF')
nose.tools.eq_(machine.machine_type, 'C_SVC')
nose.tools.eq_(len(machine.labels), 2)
assert -1 in machine.labels
assert +1 in machine.labels
assert abs(machine.gamma - 0.0769231) < 1e-6
assert type(machine.__repr__()) is str
def test_can_save():
machine = Machine(HEART_MACHINE)
tmp = tempname('.model')
machine.save(tmp)
del machine
# make sure that the save machine is the same as before
machine = Machine(tmp)
nose.tools.eq_(machine.shape, (13,1))
nose.tools.eq_(machine.n_support_vectors, [64,68])
nose.tools.eq_(machine.kernel_type, 'RBF')
nose.tools.eq_(machine.machine_type, 'C_SVC')
nose.tools.eq_(len(machine.labels), 2)
assert -1 in machine.labels
assert +1 in machine.labels
assert abs(machine.gamma - 0.0769231) < 1e-6
os.unlink(tmp)
def run_for_extension(ext):
machine = Machine(HEART_MACHINE)
tmp = tempname(ext)
machine.save(bob.io.base.HDF5File(tmp, 'w'))
del machine
# make sure that the save machine is the same as before
machine = Machine(bob.io.base.HDF5File(tmp))
nose.tools.eq_(machine.shape, (13,1))
nose.tools.eq_(machine.n_support_vectors, [64,68])
nose.tools.eq_(machine.kernel_type, 'RBF')
nose.tools.eq_(machine.machine_type, 'C_SVC')
nose.tools.eq_(len(machine.labels), 2)
assert -1 in machine.labels
assert +1 in machine.labels
assert abs(machine.gamma - 0.0769231) < 1e-6
assert numpy.all(abs(machine.input_subtract - 0) < 1e-10)
assert numpy.all(abs(machine.input_divide - 1) < 1e-10)
os.unlink(tmp)
def test_can_save_arbitrary():
run_for_extension('.arbitrary')
def test_can_save_h5():
run_for_extension('.h5')
def test_can_save_hdf5():
run_for_extension('.hdf5')
def test_data_loading():
#tests if I can load data in libsvm format using SVMFile
data = File(HEART_DATA)
nose.tools.eq_(data.shape, (13,))
nose.tools.eq_(data.good(), True)
nose.tools.eq_(data.fail(), False)
nose.tools.eq_(data.eof(), False)
#tries loading the data, one by one
all_data = []
all_labels = []
while data.good():
entry = data.read()
if entry is not None:
all_labels.append(entry[0])
all_data.append(entry[1])
nose.tools.eq_(len(all_data), len(all_labels))
nose.tools.eq_(len(all_data), 270)
#tries loading the data with numpy arrays allocated internally
counter = 0
data.reset()
entry = data.read()
while entry:
nose.tools.eq_( entry[0], all_labels[counter] )
assert numpy.array_equal(entry[1], all_data[counter])
counter += 1
entry = data.read()
#tries loading the file all in a single shot
data.reset()
labels, data = data.read_all()
assert numpy.array_equal(labels, all_labels)
for k, l in zip(data, all_data):
assert numpy.array_equal(k, l)
#makes sure the first 3 examples are correctly read
ex = []
ex.append(numpy.array([0.708333 , 1, 1, -0.320755 , -0.105023 , -1, 1,
-0.419847 ,-1, -0.225806 ,0. ,1, -1], 'float64'))
ex.append(numpy.array([0.583333, -1, 0.333333, -0.603774, 1, -1, 1,
0.358779, -1, -0.483871, 0., -1, 1], 'float64'))
ex.append(numpy.array([0.166667, 1, -0.333333, -0.433962, -0.383562, -1,
-1, 0.0687023, -1, -0.903226, -1, -1, 1], 'float64'))
ls = [+1, -1, +1]
for k, (l, e) in enumerate(zip(ls, ex)):
nose.tools.eq_( l, labels[k] )
assert numpy.array_equal(e, data[k])
@nose.tools.raises(RuntimeError)
def test_raises():
#tests that the normal machine raises because probabilities are not
#supported on that model
machine = Machine(TEST_MACHINE_NO_PROBS)
labels, data = File(HEART_DATA).read_all()
machine.predict_class_and_probabilities(data)
def test_correctness_heart():
#tests the correctness of the libSVM bindings
machine = Machine(HEART_MACHINE)
labels, data = File(HEART_DATA).read_all()
pred_label = machine.predict_class(data)
assert numpy.array_equal(pred_label, expected_heart_predictions)
#finally, we test if the values also work fine.
pred_lab_values = [machine.predict_class_and_scores(k) for k in data]
#tries the variant with multiple inputs
pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)
assert numpy.array_equal(expected_heart_predictions, pred_labels2)
assert numpy.array_equal(tuple([k[1] for k in pred_lab_values]), pred_scores2)
#tries to get the probabilities - note: for some reason, when getting
#probabilities, the labels change, but notice the note bellow:
# Note from the libSVM FAQ:
# Q: Why using the -b option does not give me better accuracy?
# There is absolutely no reason the probability outputs guarantee you
# better accuracy. The main purpose of this option is to provide you the
# probability estimates, but not to boost prediction accuracy. From our
# experience, after proper parameter selections, in general with and
# without -b have similar accuracy. Occasionally there are some
# differences. It is not recommended to compare the two under just a fixed
# parameter set as more differences will be observed.
all_labels, real_labels, real_probs = load_expected(HEART_EXPECTED)
pred_labels, pred_probs = machine.predict_class_and_probabilities(data)
assert numpy.array_equal(pred_labels, real_labels)
assert numpy.all(abs(pred_probs - real_probs) < 1e-2), abs(pred_probs - real_probs)
def test_correctness_iris():
#same test as above, but with a 3-class problem.
machine = Machine(IRIS_MACHINE)
labels, data = File(IRIS_DATA).read_all()
pred_label = machine.predict_class(data)
assert numpy.array_equal(pred_label, expected_iris_predictions)
#finally, we test if the values also work fine.
pred_lab_values = [machine.predict_class_and_scores(k) for k in data]
#tries the variant with multiple inputs
pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)
assert numpy.array_equal(expected_iris_predictions, pred_labels2)
assert numpy.all(abs(numpy.vstack([k[1] for k in
pred_lab_values]) - numpy.vstack(pred_scores2)) < 1e-20 )
#tries to get the probabilities - note: for some reason, when getting
#probabilities, the labels change, but notice the note bellow:
all_labels, real_labels, real_probs = load_expected(IRIS_EXPECTED)
pred_labels, pred_probs = machine.predict_class_and_probabilities(data)
assert numpy.array_equal(pred_labels, real_labels)
assert numpy.all(abs(numpy.vstack(pred_probs) - numpy.vstack(real_probs)) < 1e-6)
@nose.tools.raises(RuntimeError)
def test_correctness_inputsize_exceeds():
#same test as above, but test for excess input
machine = Machine(IRIS_MACHINE)
labels, data = File(IRIS_DATA).read_all()
# add extra columns to the input data
data = numpy.hstack([data, numpy.ones((data.shape[0], 2), dtype=float)])
pred_label = machine.predict_class(data)
|
normal
|
{
"blob_id": "c24be05700e5ee043d09d6f2e78cb3de1e7088f1",
"index": 6242,
"step-1": "<mask token>\n\n\ndef F(f):\n \"\"\"Returns the test file on the \"data\" subdirectory\"\"\"\n return pkg_resources.resource_filename(__name__, os.path.join('data', f))\n\n\n<mask token>\n\n\ndef load_expected(filename):\n \"\"\"Loads libsvm's svm-predict output file with probabilities\"\"\"\n all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]]\n )\n data = numpy.loadtxt(filename, dtype='float64', skiprows=1)\n return all_labels, data[:, 0].astype('int64'), data[:, 1:]\n\n\n<mask token>\n\n\ndef test_can_save():\n machine = Machine(HEART_MACHINE)\n tmp = tempname('.model')\n machine.save(tmp)\n del machine\n machine = Machine(tmp)\n nose.tools.eq_(machine.shape, (13, 1))\n nose.tools.eq_(machine.n_support_vectors, [64, 68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-06\n os.unlink(tmp)\n\n\n<mask token>\n\n\ndef test_can_save_arbitrary():\n run_for_extension('.arbitrary')\n\n\ndef test_can_save_h5():\n run_for_extension('.h5')\n\n\ndef test_can_save_hdf5():\n run_for_extension('.hdf5')\n\n\n<mask token>\n\n\ndef test_correctness_iris():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n pred_label = machine.predict_class(data)\n assert numpy.array_equal(pred_label, expected_iris_predictions)\n pred_lab_values = [machine.predict_class_and_scores(k) for k in data]\n pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)\n assert numpy.array_equal(expected_iris_predictions, pred_labels2)\n assert numpy.all(abs(numpy.vstack([k[1] for k in pred_lab_values]) -\n numpy.vstack(pred_scores2)) < 1e-20)\n all_labels, real_labels, real_probs = load_expected(IRIS_EXPECTED)\n pred_labels, pred_probs = machine.predict_class_and_probabilities(data)\n assert numpy.array_equal(pred_labels, real_labels)\n assert numpy.all(abs(numpy.vstack(pred_probs) - numpy.vstack(real_probs\n )) < 1e-06)\n\n\n@nose.tools.raises(RuntimeError)\ndef test_correctness_inputsize_exceeds():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n data = numpy.hstack([data, numpy.ones((data.shape[0], 2), dtype=float)])\n pred_label = machine.predict_class(data)\n",
"step-2": "<mask token>\n\n\ndef F(f):\n \"\"\"Returns the test file on the \"data\" subdirectory\"\"\"\n return pkg_resources.resource_filename(__name__, os.path.join('data', f))\n\n\n<mask token>\n\n\ndef load_expected(filename):\n \"\"\"Loads libsvm's svm-predict output file with probabilities\"\"\"\n all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]]\n )\n data = numpy.loadtxt(filename, dtype='float64', skiprows=1)\n return all_labels, data[:, 0].astype('int64'), data[:, 1:]\n\n\n<mask token>\n\n\ndef test_can_save():\n machine = Machine(HEART_MACHINE)\n tmp = tempname('.model')\n machine.save(tmp)\n del machine\n machine = Machine(tmp)\n nose.tools.eq_(machine.shape, (13, 1))\n nose.tools.eq_(machine.n_support_vectors, [64, 68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-06\n os.unlink(tmp)\n\n\n<mask token>\n\n\ndef test_can_save_arbitrary():\n run_for_extension('.arbitrary')\n\n\ndef test_can_save_h5():\n run_for_extension('.h5')\n\n\ndef test_can_save_hdf5():\n run_for_extension('.hdf5')\n\n\ndef test_data_loading():\n data = File(HEART_DATA)\n nose.tools.eq_(data.shape, (13,))\n nose.tools.eq_(data.good(), True)\n nose.tools.eq_(data.fail(), False)\n nose.tools.eq_(data.eof(), False)\n all_data = []\n all_labels = []\n while data.good():\n entry = data.read()\n if entry is not None:\n all_labels.append(entry[0])\n all_data.append(entry[1])\n nose.tools.eq_(len(all_data), len(all_labels))\n nose.tools.eq_(len(all_data), 270)\n counter = 0\n data.reset()\n entry = data.read()\n while entry:\n nose.tools.eq_(entry[0], all_labels[counter])\n assert numpy.array_equal(entry[1], all_data[counter])\n counter += 1\n entry = data.read()\n data.reset()\n labels, data = data.read_all()\n assert numpy.array_equal(labels, all_labels)\n for k, l in zip(data, all_data):\n assert numpy.array_equal(k, l)\n ex = []\n ex.append(numpy.array([0.708333, 1, 1, -0.320755, -0.105023, -1, 1, -\n 0.419847, -1, -0.225806, 0.0, 1, -1], 'float64'))\n ex.append(numpy.array([0.583333, -1, 0.333333, -0.603774, 1, -1, 1, \n 0.358779, -1, -0.483871, 0.0, -1, 1], 'float64'))\n ex.append(numpy.array([0.166667, 1, -0.333333, -0.433962, -0.383562, -1,\n -1, 0.0687023, -1, -0.903226, -1, -1, 1], 'float64'))\n ls = [+1, -1, +1]\n for k, (l, e) in enumerate(zip(ls, ex)):\n nose.tools.eq_(l, labels[k])\n assert numpy.array_equal(e, data[k])\n\n\n<mask token>\n\n\ndef test_correctness_iris():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n pred_label = machine.predict_class(data)\n assert numpy.array_equal(pred_label, expected_iris_predictions)\n pred_lab_values = [machine.predict_class_and_scores(k) for k in data]\n pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)\n assert numpy.array_equal(expected_iris_predictions, pred_labels2)\n assert numpy.all(abs(numpy.vstack([k[1] for k in pred_lab_values]) -\n numpy.vstack(pred_scores2)) < 1e-20)\n all_labels, real_labels, real_probs = load_expected(IRIS_EXPECTED)\n pred_labels, pred_probs = machine.predict_class_and_probabilities(data)\n assert numpy.array_equal(pred_labels, real_labels)\n assert numpy.all(abs(numpy.vstack(pred_probs) - numpy.vstack(real_probs\n )) < 1e-06)\n\n\n@nose.tools.raises(RuntimeError)\ndef test_correctness_inputsize_exceeds():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n data = numpy.hstack([data, numpy.ones((data.shape[0], 2), dtype=float)])\n pred_label = machine.predict_class(data)\n",
"step-3": "<mask token>\n\n\ndef F(f):\n \"\"\"Returns the test file on the \"data\" subdirectory\"\"\"\n return pkg_resources.resource_filename(__name__, os.path.join('data', f))\n\n\ndef tempname(suffix, prefix='bobtest_machine_'):\n fd, name = tempfile.mkstemp(suffix, prefix)\n os.close(fd)\n os.unlink(name)\n return name\n\n\n<mask token>\n\n\ndef load_expected(filename):\n \"\"\"Loads libsvm's svm-predict output file with probabilities\"\"\"\n all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]]\n )\n data = numpy.loadtxt(filename, dtype='float64', skiprows=1)\n return all_labels, data[:, 0].astype('int64'), data[:, 1:]\n\n\n<mask token>\n\n\ndef test_can_save():\n machine = Machine(HEART_MACHINE)\n tmp = tempname('.model')\n machine.save(tmp)\n del machine\n machine = Machine(tmp)\n nose.tools.eq_(machine.shape, (13, 1))\n nose.tools.eq_(machine.n_support_vectors, [64, 68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-06\n os.unlink(tmp)\n\n\ndef run_for_extension(ext):\n machine = Machine(HEART_MACHINE)\n tmp = tempname(ext)\n machine.save(bob.io.base.HDF5File(tmp, 'w'))\n del machine\n machine = Machine(bob.io.base.HDF5File(tmp))\n nose.tools.eq_(machine.shape, (13, 1))\n nose.tools.eq_(machine.n_support_vectors, [64, 68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-06\n assert numpy.all(abs(machine.input_subtract - 0) < 1e-10)\n assert numpy.all(abs(machine.input_divide - 1) < 1e-10)\n os.unlink(tmp)\n\n\ndef test_can_save_arbitrary():\n run_for_extension('.arbitrary')\n\n\ndef test_can_save_h5():\n run_for_extension('.h5')\n\n\ndef test_can_save_hdf5():\n run_for_extension('.hdf5')\n\n\ndef test_data_loading():\n data = File(HEART_DATA)\n nose.tools.eq_(data.shape, (13,))\n nose.tools.eq_(data.good(), True)\n nose.tools.eq_(data.fail(), False)\n nose.tools.eq_(data.eof(), False)\n all_data = []\n all_labels = []\n while data.good():\n entry = data.read()\n if entry is not None:\n all_labels.append(entry[0])\n all_data.append(entry[1])\n nose.tools.eq_(len(all_data), len(all_labels))\n nose.tools.eq_(len(all_data), 270)\n counter = 0\n data.reset()\n entry = data.read()\n while entry:\n nose.tools.eq_(entry[0], all_labels[counter])\n assert numpy.array_equal(entry[1], all_data[counter])\n counter += 1\n entry = data.read()\n data.reset()\n labels, data = data.read_all()\n assert numpy.array_equal(labels, all_labels)\n for k, l in zip(data, all_data):\n assert numpy.array_equal(k, l)\n ex = []\n ex.append(numpy.array([0.708333, 1, 1, -0.320755, -0.105023, -1, 1, -\n 0.419847, -1, -0.225806, 0.0, 1, -1], 'float64'))\n ex.append(numpy.array([0.583333, -1, 0.333333, -0.603774, 1, -1, 1, \n 0.358779, -1, -0.483871, 0.0, -1, 1], 'float64'))\n ex.append(numpy.array([0.166667, 1, -0.333333, -0.433962, -0.383562, -1,\n -1, 0.0687023, -1, -0.903226, -1, -1, 1], 'float64'))\n ls = [+1, -1, +1]\n for k, (l, e) in enumerate(zip(ls, ex)):\n nose.tools.eq_(l, labels[k])\n assert numpy.array_equal(e, data[k])\n\n\n@nose.tools.raises(RuntimeError)\ndef test_raises():\n machine = Machine(TEST_MACHINE_NO_PROBS)\n labels, data = File(HEART_DATA).read_all()\n machine.predict_class_and_probabilities(data)\n\n\n<mask token>\n\n\ndef test_correctness_iris():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n pred_label = machine.predict_class(data)\n assert numpy.array_equal(pred_label, expected_iris_predictions)\n pred_lab_values = [machine.predict_class_and_scores(k) for k in data]\n pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)\n assert numpy.array_equal(expected_iris_predictions, pred_labels2)\n assert numpy.all(abs(numpy.vstack([k[1] for k in pred_lab_values]) -\n numpy.vstack(pred_scores2)) < 1e-20)\n all_labels, real_labels, real_probs = load_expected(IRIS_EXPECTED)\n pred_labels, pred_probs = machine.predict_class_and_probabilities(data)\n assert numpy.array_equal(pred_labels, real_labels)\n assert numpy.all(abs(numpy.vstack(pred_probs) - numpy.vstack(real_probs\n )) < 1e-06)\n\n\n@nose.tools.raises(RuntimeError)\ndef test_correctness_inputsize_exceeds():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n data = numpy.hstack([data, numpy.ones((data.shape[0], 2), dtype=float)])\n pred_label = machine.predict_class(data)\n",
"step-4": "<mask token>\n\n\ndef F(f):\n \"\"\"Returns the test file on the \"data\" subdirectory\"\"\"\n return pkg_resources.resource_filename(__name__, os.path.join('data', f))\n\n\ndef tempname(suffix, prefix='bobtest_machine_'):\n fd, name = tempfile.mkstemp(suffix, prefix)\n os.close(fd)\n os.unlink(name)\n return name\n\n\n<mask token>\n\n\ndef load_expected(filename):\n \"\"\"Loads libsvm's svm-predict output file with probabilities\"\"\"\n all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]]\n )\n data = numpy.loadtxt(filename, dtype='float64', skiprows=1)\n return all_labels, data[:, 0].astype('int64'), data[:, 1:]\n\n\n<mask token>\n\n\ndef test_can_save():\n machine = Machine(HEART_MACHINE)\n tmp = tempname('.model')\n machine.save(tmp)\n del machine\n machine = Machine(tmp)\n nose.tools.eq_(machine.shape, (13, 1))\n nose.tools.eq_(machine.n_support_vectors, [64, 68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-06\n os.unlink(tmp)\n\n\ndef run_for_extension(ext):\n machine = Machine(HEART_MACHINE)\n tmp = tempname(ext)\n machine.save(bob.io.base.HDF5File(tmp, 'w'))\n del machine\n machine = Machine(bob.io.base.HDF5File(tmp))\n nose.tools.eq_(machine.shape, (13, 1))\n nose.tools.eq_(machine.n_support_vectors, [64, 68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-06\n assert numpy.all(abs(machine.input_subtract - 0) < 1e-10)\n assert numpy.all(abs(machine.input_divide - 1) < 1e-10)\n os.unlink(tmp)\n\n\ndef test_can_save_arbitrary():\n run_for_extension('.arbitrary')\n\n\ndef test_can_save_h5():\n run_for_extension('.h5')\n\n\ndef test_can_save_hdf5():\n run_for_extension('.hdf5')\n\n\ndef test_data_loading():\n data = File(HEART_DATA)\n nose.tools.eq_(data.shape, (13,))\n nose.tools.eq_(data.good(), True)\n nose.tools.eq_(data.fail(), False)\n nose.tools.eq_(data.eof(), False)\n all_data = []\n all_labels = []\n while data.good():\n entry = data.read()\n if entry is not None:\n all_labels.append(entry[0])\n all_data.append(entry[1])\n nose.tools.eq_(len(all_data), len(all_labels))\n nose.tools.eq_(len(all_data), 270)\n counter = 0\n data.reset()\n entry = data.read()\n while entry:\n nose.tools.eq_(entry[0], all_labels[counter])\n assert numpy.array_equal(entry[1], all_data[counter])\n counter += 1\n entry = data.read()\n data.reset()\n labels, data = data.read_all()\n assert numpy.array_equal(labels, all_labels)\n for k, l in zip(data, all_data):\n assert numpy.array_equal(k, l)\n ex = []\n ex.append(numpy.array([0.708333, 1, 1, -0.320755, -0.105023, -1, 1, -\n 0.419847, -1, -0.225806, 0.0, 1, -1], 'float64'))\n ex.append(numpy.array([0.583333, -1, 0.333333, -0.603774, 1, -1, 1, \n 0.358779, -1, -0.483871, 0.0, -1, 1], 'float64'))\n ex.append(numpy.array([0.166667, 1, -0.333333, -0.433962, -0.383562, -1,\n -1, 0.0687023, -1, -0.903226, -1, -1, 1], 'float64'))\n ls = [+1, -1, +1]\n for k, (l, e) in enumerate(zip(ls, ex)):\n nose.tools.eq_(l, labels[k])\n assert numpy.array_equal(e, data[k])\n\n\n@nose.tools.raises(RuntimeError)\ndef test_raises():\n machine = Machine(TEST_MACHINE_NO_PROBS)\n labels, data = File(HEART_DATA).read_all()\n machine.predict_class_and_probabilities(data)\n\n\ndef test_correctness_heart():\n machine = Machine(HEART_MACHINE)\n labels, data = File(HEART_DATA).read_all()\n pred_label = machine.predict_class(data)\n assert numpy.array_equal(pred_label, expected_heart_predictions)\n pred_lab_values = [machine.predict_class_and_scores(k) for k in data]\n pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)\n assert numpy.array_equal(expected_heart_predictions, pred_labels2)\n assert numpy.array_equal(tuple([k[1] for k in pred_lab_values]),\n pred_scores2)\n all_labels, real_labels, real_probs = load_expected(HEART_EXPECTED)\n pred_labels, pred_probs = machine.predict_class_and_probabilities(data)\n assert numpy.array_equal(pred_labels, real_labels)\n assert numpy.all(abs(pred_probs - real_probs) < 0.01), abs(pred_probs -\n real_probs)\n\n\ndef test_correctness_iris():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n pred_label = machine.predict_class(data)\n assert numpy.array_equal(pred_label, expected_iris_predictions)\n pred_lab_values = [machine.predict_class_and_scores(k) for k in data]\n pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)\n assert numpy.array_equal(expected_iris_predictions, pred_labels2)\n assert numpy.all(abs(numpy.vstack([k[1] for k in pred_lab_values]) -\n numpy.vstack(pred_scores2)) < 1e-20)\n all_labels, real_labels, real_probs = load_expected(IRIS_EXPECTED)\n pred_labels, pred_probs = machine.predict_class_and_probabilities(data)\n assert numpy.array_equal(pred_labels, real_labels)\n assert numpy.all(abs(numpy.vstack(pred_probs) - numpy.vstack(real_probs\n )) < 1e-06)\n\n\n@nose.tools.raises(RuntimeError)\ndef test_correctness_inputsize_exceeds():\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n data = numpy.hstack([data, numpy.ones((data.shape[0], 2), dtype=float)])\n pred_label = machine.predict_class(data)\n",
"step-5": "#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Andre Anjos <andre.anjos@idiap.ch>\n# Sat Dec 17 14:41:56 2011 +0100\n#\n# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland\n\n\"\"\"Run tests on the libsvm machine infrastructure.\n\"\"\"\n\nimport os\nimport numpy\nimport tempfile\nimport pkg_resources\nimport nose.tools\nimport bob.io.base\n\nfrom . import File, Machine\n\ndef F(f):\n \"\"\"Returns the test file on the \"data\" subdirectory\"\"\"\n return pkg_resources.resource_filename(__name__, os.path.join('data', f))\n\ndef tempname(suffix, prefix='bobtest_machine_'):\n (fd, name) = tempfile.mkstemp(suffix, prefix)\n os.close(fd)\n os.unlink(name)\n return name\n\nTEST_MACHINE_NO_PROBS = F('heart_no_probs.svmmodel')\n\nHEART_DATA = F('heart.svmdata') #13 inputs\nHEART_MACHINE = F('heart.svmmodel') #supports probabilities\nHEART_EXPECTED = F('heart.out') #expected probabilities\n\nIRIS_DATA = F('iris.svmdata')\nIRIS_MACHINE = F('iris.svmmodel')\nIRIS_EXPECTED = F('iris.out') #expected probabilities\n\ndef load_expected(filename):\n \"\"\"Loads libsvm's svm-predict output file with probabilities\"\"\"\n\n all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]])\n data = numpy.loadtxt(filename, dtype='float64', skiprows=1)\n return all_labels, data[:,0].astype('int64'), data[:,1:]\n\n#extracted by running svm-predict.c on the heart_scale example data\nexpected_heart_predictions = (1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1,\n -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1,\n 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1,\n -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,\n 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1,\n -1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1,\n 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1,\n 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,\n -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1,\n -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1,\n 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1,\n -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1,\n 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1,\n -1, -1, -1, -1, -1, -1, 1)\n\nexpected_iris_predictions = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3)\n\ndef test_can_load():\n\n machine = Machine(HEART_MACHINE)\n nose.tools.eq_(machine.shape, (13,1))\n nose.tools.eq_(machine.n_support_vectors, [64,68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-6\n assert type(machine.__repr__()) is str\n\ndef test_can_save():\n\n machine = Machine(HEART_MACHINE)\n tmp = tempname('.model')\n machine.save(tmp)\n del machine\n\n # make sure that the save machine is the same as before\n machine = Machine(tmp)\n nose.tools.eq_(machine.shape, (13,1))\n nose.tools.eq_(machine.n_support_vectors, [64,68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-6\n\n os.unlink(tmp)\n\ndef run_for_extension(ext):\n\n machine = Machine(HEART_MACHINE)\n tmp = tempname(ext)\n machine.save(bob.io.base.HDF5File(tmp, 'w'))\n del machine\n\n # make sure that the save machine is the same as before\n machine = Machine(bob.io.base.HDF5File(tmp))\n nose.tools.eq_(machine.shape, (13,1))\n nose.tools.eq_(machine.n_support_vectors, [64,68])\n nose.tools.eq_(machine.kernel_type, 'RBF')\n nose.tools.eq_(machine.machine_type, 'C_SVC')\n nose.tools.eq_(len(machine.labels), 2)\n assert -1 in machine.labels\n assert +1 in machine.labels\n assert abs(machine.gamma - 0.0769231) < 1e-6\n assert numpy.all(abs(machine.input_subtract - 0) < 1e-10)\n assert numpy.all(abs(machine.input_divide - 1) < 1e-10)\n\n os.unlink(tmp)\n\ndef test_can_save_arbitrary():\n run_for_extension('.arbitrary')\n\ndef test_can_save_h5():\n run_for_extension('.h5')\n\ndef test_can_save_hdf5():\n run_for_extension('.hdf5')\n\ndef test_data_loading():\n\n #tests if I can load data in libsvm format using SVMFile\n data = File(HEART_DATA)\n nose.tools.eq_(data.shape, (13,))\n nose.tools.eq_(data.good(), True)\n nose.tools.eq_(data.fail(), False)\n nose.tools.eq_(data.eof(), False)\n\n\n #tries loading the data, one by one\n all_data = []\n all_labels = []\n while data.good():\n entry = data.read()\n if entry is not None:\n all_labels.append(entry[0])\n all_data.append(entry[1])\n\n nose.tools.eq_(len(all_data), len(all_labels))\n nose.tools.eq_(len(all_data), 270)\n\n #tries loading the data with numpy arrays allocated internally\n counter = 0\n data.reset()\n entry = data.read()\n while entry:\n nose.tools.eq_( entry[0], all_labels[counter] )\n assert numpy.array_equal(entry[1], all_data[counter])\n counter += 1\n entry = data.read()\n\n\n #tries loading the file all in a single shot\n data.reset()\n labels, data = data.read_all()\n\n assert numpy.array_equal(labels, all_labels)\n for k, l in zip(data, all_data):\n assert numpy.array_equal(k, l)\n\n\n\n\n #makes sure the first 3 examples are correctly read\n ex = []\n ex.append(numpy.array([0.708333 , 1, 1, -0.320755 , -0.105023 , -1, 1,\n -0.419847 ,-1, -0.225806 ,0. ,1, -1], 'float64'))\n ex.append(numpy.array([0.583333, -1, 0.333333, -0.603774, 1, -1, 1,\n 0.358779, -1, -0.483871, 0., -1, 1], 'float64'))\n ex.append(numpy.array([0.166667, 1, -0.333333, -0.433962, -0.383562, -1,\n -1, 0.0687023, -1, -0.903226, -1, -1, 1], 'float64'))\n ls = [+1, -1, +1]\n\n for k, (l, e) in enumerate(zip(ls, ex)):\n nose.tools.eq_( l, labels[k] )\n assert numpy.array_equal(e, data[k])\n\n\n@nose.tools.raises(RuntimeError)\ndef test_raises():\n\n #tests that the normal machine raises because probabilities are not\n #supported on that model\n machine = Machine(TEST_MACHINE_NO_PROBS)\n labels, data = File(HEART_DATA).read_all()\n machine.predict_class_and_probabilities(data)\n\ndef test_correctness_heart():\n\n #tests the correctness of the libSVM bindings\n machine = Machine(HEART_MACHINE)\n labels, data = File(HEART_DATA).read_all()\n pred_label = machine.predict_class(data)\n\n assert numpy.array_equal(pred_label, expected_heart_predictions)\n\n #finally, we test if the values also work fine.\n pred_lab_values = [machine.predict_class_and_scores(k) for k in data]\n\n #tries the variant with multiple inputs\n pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)\n assert numpy.array_equal(expected_heart_predictions, pred_labels2)\n assert numpy.array_equal(tuple([k[1] for k in pred_lab_values]), pred_scores2)\n\n #tries to get the probabilities - note: for some reason, when getting\n #probabilities, the labels change, but notice the note bellow:\n\n # Note from the libSVM FAQ:\n # Q: Why using the -b option does not give me better accuracy?\n # There is absolutely no reason the probability outputs guarantee you\n # better accuracy. The main purpose of this option is to provide you the\n # probability estimates, but not to boost prediction accuracy. From our\n # experience, after proper parameter selections, in general with and\n # without -b have similar accuracy. Occasionally there are some\n # differences. It is not recommended to compare the two under just a fixed\n # parameter set as more differences will be observed.\n all_labels, real_labels, real_probs = load_expected(HEART_EXPECTED)\n\n pred_labels, pred_probs = machine.predict_class_and_probabilities(data)\n assert numpy.array_equal(pred_labels, real_labels)\n assert numpy.all(abs(pred_probs - real_probs) < 1e-2), abs(pred_probs - real_probs)\n\ndef test_correctness_iris():\n\n #same test as above, but with a 3-class problem.\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n pred_label = machine.predict_class(data)\n\n assert numpy.array_equal(pred_label, expected_iris_predictions)\n\n #finally, we test if the values also work fine.\n pred_lab_values = [machine.predict_class_and_scores(k) for k in data]\n\n #tries the variant with multiple inputs\n pred_labels2, pred_scores2 = machine.predict_class_and_scores(data)\n assert numpy.array_equal(expected_iris_predictions, pred_labels2)\n assert numpy.all(abs(numpy.vstack([k[1] for k in\n pred_lab_values]) - numpy.vstack(pred_scores2)) < 1e-20 )\n\n #tries to get the probabilities - note: for some reason, when getting\n #probabilities, the labels change, but notice the note bellow:\n\n all_labels, real_labels, real_probs = load_expected(IRIS_EXPECTED)\n\n pred_labels, pred_probs = machine.predict_class_and_probabilities(data)\n assert numpy.array_equal(pred_labels, real_labels)\n assert numpy.all(abs(numpy.vstack(pred_probs) - numpy.vstack(real_probs)) < 1e-6)\n\n\n@nose.tools.raises(RuntimeError)\ndef test_correctness_inputsize_exceeds():\n\n #same test as above, but test for excess input\n machine = Machine(IRIS_MACHINE)\n labels, data = File(IRIS_DATA).read_all()\n\n # add extra columns to the input data\n data = numpy.hstack([data, numpy.ones((data.shape[0], 2), dtype=float)])\n\n pred_label = machine.predict_class(data)\n",
"step-ids": [
8,
9,
12,
13,
17
]
}
|
[
8,
9,
12,
13,
17
] |
import pymysql
from app_module.models import User, Vehicle, Address, Customer, Location, Coupon, VehicleClass, Corporation, Corporate
from datetime import datetime
HOSTNAME = 'localhost'
USERNAME = 'root'
PASSWORD = '123456'
DATABASE = 'proj_p2'
def get_connection():
my_sql_connection = pymysql.connect(host=HOSTNAME, user=USERNAME, passwd=PASSWORD, db=DATABASE)
return my_sql_connection
def run_query(query, args=None):
conn = get_connection()
cur = conn.cursor()
cur.execute(query, args)
rs = cur.fetchall()
if (len(rs) != 0):
return rs
conn.commit()
cur.close()
conn.close()
def insert_address(address_obj):
run_query('''insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'''
, (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))
rs = run_query('''select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'''
, (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))
return rs[0][0]
def insert_customer(customer_obj):
run_query('''insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id,
username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) '''
, (customer_obj.cust_type, customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email,
customer_obj.cust_phonenum, customer_obj.address_id, customer_obj.username, customer_obj.password))
rs = run_query(
'''select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'''
, (customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email, customer_obj.cust_phonenum))
return rs[0][0]
def insert_vehicle(vehicle_obj):
run_query('''insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values
(%s, %s, %s, %s, %s, %s, %s) '''
, (
vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num, vehicle_obj.license_num,
vehicle_obj.class_num, vehicle_obj.location_id))
rs = run_query('''select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin
= %s and veh_license = %s and vc_num = %s and ol_id = %s '''
, (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num,
vehicle_obj.license_num, vehicle_obj.class_num, vehicle_obj.location_id))
return rs[0][0]
def insert_vehicle_class(class_obj):
run_query('''insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'''
, (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))
rs = run_query('''select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile =
%s '''
, (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))
return rs[0][0]
def insert_office_location(location_obj):
run_query('''insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s,
%s, %s, %s, %s) '''
, (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,
int(location_obj.zipcode)))
rs = run_query('''select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s
and ol_street=%s and ol_zipcode=%s '''
, (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,
int(location_obj.zipcode)))
return rs[0][0]
def insert_corporation(corp_obj):
run_query('''insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'''
, (corp_obj.corp_name, corp_obj.corp_regnum))
rs = run_query('''select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'''
, (corp_obj.corp_name, corp_obj.corp_regnum))
return rs[0][0]
def insert_corporate(corporate_obj):
run_query('''insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'''
, (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))
rs = run_query(
'''select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'''
, (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))
return rs[0][0]
def insert_individual(individual_obj):
run_query(
'''insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'''
, (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,
individual_obj.cust_insurpolnum, individual_obj.cust_type))
rs = run_query(
'''select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'''
, (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,
individual_obj.cust_insurpolnum, individual_obj.cust_type))
return rs[0][0]
def insert_invoice(invoice_obj):
run_query('''insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) '''
, (invoice_obj.inv_date, invoice_obj.inv_amount))
rs = run_query('''select * from zlrz_invoice where inv_date = %s and inv_amount = %s'''
, (invoice_obj.inv_date, invoice_obj.inv_amount))
return rs[0][0]
def insert_payment(payment_obj):
run_query('''insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount)
values (%s, %s , %s , %s , %s) '''
, (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id
, payment_obj.pay_amount))
rs = run_query('''select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s
and pay_amount=%s'''
, (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id
, payment_obj.pay_amount))
return rs[0][0]
def insert_rental(rental_obj):
run_query('''insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer
, ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id)
values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) '''
, (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer
, rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id
, rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.ren_dropoffloc_id
, rental_obj.inv_id, rental_obj.cou_id))
rs = run_query('''select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s
and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s
and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s'''
, (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer
, rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id
, rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid,
rental_obj.ren_dropoffloc_id
, rental_obj.inv_id, rental_obj.cou_id))
return rs[0][0]
def insert_coupon(coupon_obj):
run_query('''insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '''
, (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))
if coupon_obj.validstart and coupon_obj.validend:
rs = run_query(
'''select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'''
, (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))
else:
rs = run_query(
'''select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'''
, (coupon_obj.cou_rate))
return rs[0][0]
def insert_cust_coupon(cust_coupon_obj):
run_query('''insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '''
,
(cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj.cust_type, cust_coupon_obj.coupon_type))
return
def get_password(username):
rs = run_query('''select password from zlrz_customer where username = %s''', (username,))
return rs[0][0] if rs is not None else rs
def get_user_type(username):
rs = run_query('''select cust_type from zlrz_customer where username = %s''', (username,))
return rs[0][0] if rs is not None else rs
def get_user_id(username):
rs = run_query('''select cust_id from zlrz_customer where username = %s''', (username,))
return rs[0][0] if rs is not None else rs
def get_all_corporations():
rs = run_query('''select * from zlrz_corporation''')
return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2], t[0]), rs))
def get_cust_coupon(cust_id):
rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons
on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s''', (cust_id))
return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))
def get_coupon(cust_id):
rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons
on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s'''
, (cust_id,))
res = None
maxrate = float('-inf')
if rs is not None:
coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))
for cou in coupons:
if cou.validstart and cou.validend:
if (datetime.now() - cou.validstart).days >= 0 and (cou.validend - datetime.now()).days >= 0:
if cou.cou_rate > maxrate:
maxrate = cou.cou_rate
res = cou
if not cou.validstart and not cou.validend:
if cou.cou_rate > maxrate:
maxrate = cou.cou_rate
res = cou
return res
def get_vehicles():
"""
Get full location
:return:
"""
rs = run_query('''select * from zlrz_vehicle''')
return [] if rs is None else rs
def get_all_customers():
rs = run_query('''select * from zlrz_customer''')
return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))
def get_all_corporate():
rs = run_query('''select * from zlrz_corporate''')
return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3]), rs))
def get_all_individual():
rs = run_query('''select * from zlrz_individual''')
return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3], t[4]), rs))
def get_all_vehicles():
rs = run_query('''select * from zlrz_vehicle''')
return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))
def get_all_locations():
"""
Get all location objects
:return:
"""
rs = run_query('''select * from zlrz_office_location''')
return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))
def get_location_by_id(location_id):
"""
Get all location objects
:return:
"""
rs = run_query('''select * from zlrz_office_location where ol_id = %s''', (location_id,))
return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))[0] if rs is not None else None
def get_all_vehclasses():
"""
Get all vehicleclass objects
:return:
"""
rs = run_query('''select * from zlrz_vehicle_class''')
return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))
def get_vehicle_by_id(vehicle_id):
rs = run_query('''select * from zlrz_vehicle where veh_id=%s''', (int(vehicle_id),))
return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))[0] \
if rs is not None else None
def get_vehicle_class(vehicle_id):
rs = run_query('''select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class
on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s''', (int(vehicle_id),))
return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0] if rs is not None else None
def delete_veh_class(vc_num):
if vc_num == '':
return
res = run_query('''select * from zlrz_vehicle where vc_num=%s''', (int(vc_num)))
if res:
return 1
else:
rs = run_query('''delete from zlrz_vehicle_class where vc_num=%s''', (int(vc_num)))
return rs
def delete_off_loc(location_id):
if location_id == '':
return
res = run_query('''select * from zlrz_office_location where ol_id=%s''', (int(location_id)))
if res:
return 1
else:
rs = run_query('''delete from zlrz_office_location where ol_id=%s''', (int(location_id)))
return rs
def delete_vehicle(veh_id):
if veh_id == '':
return
rs = run_query('''delete from zlrz_vehicle where veh_id=%s''', (int(veh_id)))
return rs
def delete_customer(cust_id):
if cust_id == '':
return
rs5 = run_query('''delete from zlrz_rental where cust_id=%s''', (int(cust_id)))
rs4 = run_query('''delete from zlrz_cust_coupon where cust_id=%s''', (int(cust_id)))
rs2 = run_query('''delete from zlrz_corporate where cust_id=%s''', (int(cust_id)))
rs3 = run_query('''delete from zlrz_individual where cust_id=%s''', (int(cust_id)))
rs1 = run_query('''delete from zlrz_customer where cust_id=%s''', (int(cust_id)))
return rs1
def delete_cust_coupon(cou_id):
if cou_id == '':
return
rs1 = run_query('''delete from zlrz_cust_coupon where cou_id=%s''', (int(cou_id)))
rs2 = run_query('''delete from zlrz_coupons where cou_id=%s''', (int(cou_id)))
return rs1
def delete_corporation(corp_id):
if corp_id == '':
return
res = run_query('''select * from zlrz_corporation where corp_id=%s''', (int(corp_id)))
if res:
return 1
else:
rs = run_query('''delete from zlrz_corporation where corp_id=%s''', (int(corp_id)))
return rs
def update_vehicle_class(class_obj):
rs = run_query('''update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s''', (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile), class_obj.vc_name))
return rs
|
normal
|
{
"blob_id": "62bad8eeb3b51a5012dad761a60639d36429d8e8",
"index": 7660,
"step-1": "<mask token>\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\n<mask token>\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\n<mask token>\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\n<mask token>\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\n<mask token>\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query(\n 'insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query(\n 'select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\n<mask token>\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\n<mask token>\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('select * from zlrz_individual')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3], t[4]), rs))\n\n\n<mask token>\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\n<mask token>\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query(\n 'insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query(\n 'select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_individual(individual_obj):\n run_query(\n 'insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\ndef insert_payment(payment_obj):\n run_query(\n \"\"\"insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount) \n values (%s, %s , %s , %s , %s) \"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n rs = run_query(\n \"\"\"select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s\n and pay_amount=%s\"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n return rs[0][0]\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\ndef insert_cust_coupon(cust_coupon_obj):\n run_query(\n 'insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '\n , (cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj\n .cust_type, cust_coupon_obj.coupon_type))\n return\n\n\ndef get_password(username):\n rs = run_query('select password from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_all_corporations():\n rs = run_query('select * from zlrz_corporation')\n return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2],\n t[0]), rs))\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\ndef get_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , (cust_id,))\n res = None\n maxrate = float('-inf')\n if rs is not None:\n coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n for cou in coupons:\n if cou.validstart and cou.validend:\n if (datetime.now() - cou.validstart).days >= 0 and (cou.\n validend - datetime.now()).days >= 0:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n if not cou.validstart and not cou.validend:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n return res\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('select * from zlrz_individual')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3], t[4]), rs))\n\n\ndef get_all_vehicles():\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3\n ], t[4], t[5], t[6], t[7], t[0]), rs))\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\ndef get_vehicle_by_id(vehicle_id):\n rs = run_query('select * from zlrz_vehicle where veh_id=%s', (int(\n vehicle_id),))\n return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t\n [7], t[0]), rs))[0] if rs is not None else None\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\ndef delete_veh_class(vc_num):\n if vc_num == '':\n return\n res = run_query('select * from zlrz_vehicle where vc_num=%s', int(vc_num))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_vehicle_class where vc_num=%s',\n int(vc_num))\n return rs\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\ndef update_vehicle_class(class_obj):\n rs = run_query(\n 'update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s'\n , (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile),\n class_obj.vc_name))\n return rs\n",
"step-4": "import pymysql\nfrom app_module.models import User, Vehicle, Address, Customer, Location, Coupon, VehicleClass, Corporation, Corporate\nfrom datetime import datetime\nHOSTNAME = 'localhost'\nUSERNAME = 'root'\nPASSWORD = '123456'\nDATABASE = 'proj_p2'\n\n\ndef get_connection():\n my_sql_connection = pymysql.connect(host=HOSTNAME, user=USERNAME,\n passwd=PASSWORD, db=DATABASE)\n return my_sql_connection\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query(\n 'insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query(\n 'select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_individual(individual_obj):\n run_query(\n 'insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\ndef insert_payment(payment_obj):\n run_query(\n \"\"\"insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount) \n values (%s, %s , %s , %s , %s) \"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n rs = run_query(\n \"\"\"select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s\n and pay_amount=%s\"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n return rs[0][0]\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\ndef insert_cust_coupon(cust_coupon_obj):\n run_query(\n 'insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '\n , (cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj\n .cust_type, cust_coupon_obj.coupon_type))\n return\n\n\ndef get_password(username):\n rs = run_query('select password from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_all_corporations():\n rs = run_query('select * from zlrz_corporation')\n return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2],\n t[0]), rs))\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\ndef get_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , (cust_id,))\n res = None\n maxrate = float('-inf')\n if rs is not None:\n coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n for cou in coupons:\n if cou.validstart and cou.validend:\n if (datetime.now() - cou.validstart).days >= 0 and (cou.\n validend - datetime.now()).days >= 0:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n if not cou.validstart and not cou.validend:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n return res\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('select * from zlrz_individual')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3], t[4]), rs))\n\n\ndef get_all_vehicles():\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3\n ], t[4], t[5], t[6], t[7], t[0]), rs))\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\ndef get_vehicle_by_id(vehicle_id):\n rs = run_query('select * from zlrz_vehicle where veh_id=%s', (int(\n vehicle_id),))\n return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t\n [7], t[0]), rs))[0] if rs is not None else None\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\ndef delete_veh_class(vc_num):\n if vc_num == '':\n return\n res = run_query('select * from zlrz_vehicle where vc_num=%s', int(vc_num))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_vehicle_class where vc_num=%s',\n int(vc_num))\n return rs\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\ndef update_vehicle_class(class_obj):\n rs = run_query(\n 'update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s'\n , (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile),\n class_obj.vc_name))\n return rs\n",
"step-5": "import pymysql\nfrom app_module.models import User, Vehicle, Address, Customer, Location, Coupon, VehicleClass, Corporation, Corporate\nfrom datetime import datetime\n\nHOSTNAME = 'localhost'\nUSERNAME = 'root'\nPASSWORD = '123456'\nDATABASE = 'proj_p2'\n\n\ndef get_connection():\n my_sql_connection = pymysql.connect(host=HOSTNAME, user=USERNAME, passwd=PASSWORD, db=DATABASE)\n return my_sql_connection\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n\n cur.execute(query, args)\n\n rs = cur.fetchall()\n if (len(rs) != 0):\n return rs\n conn.commit()\n\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query('''insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'''\n , (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))\n rs = run_query('''select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'''\n , (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query('''insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) '''\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email,\n customer_obj.cust_phonenum, customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n '''select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'''\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query('''insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) '''\n , (\n vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num, vehicle_obj.license_num,\n vehicle_obj.class_num, vehicle_obj.location_id))\n rs = run_query('''select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s '''\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num,\n vehicle_obj.license_num, vehicle_obj.class_num, vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query('''insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'''\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))\n rs = run_query('''select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s '''\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query('''insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) '''\n , (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,\n int(location_obj.zipcode)))\n rs = run_query('''select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s '''\n , (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,\n int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query('''insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'''\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query('''select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'''\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query('''insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'''\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))\n rs = run_query(\n '''select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'''\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_individual(individual_obj):\n run_query(\n '''insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'''\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,\n individual_obj.cust_insurpolnum, individual_obj.cust_type))\n rs = run_query(\n '''select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'''\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,\n individual_obj.cust_insurpolnum, individual_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_invoice(invoice_obj):\n run_query('''insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) '''\n , (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query('''select * from zlrz_invoice where inv_date = %s and inv_amount = %s'''\n , (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\ndef insert_payment(payment_obj):\n run_query('''insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount) \n values (%s, %s , %s , %s , %s) '''\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id\n , payment_obj.pay_amount))\n rs = run_query('''select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s\n and pay_amount=%s'''\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id\n , payment_obj.pay_amount))\n return rs[0][0]\n\n\ndef insert_rental(rental_obj):\n run_query('''insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) '''\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer\n , rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id\n , rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.ren_dropoffloc_id\n , rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query('''select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s'''\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer\n , rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id\n , rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid,\n rental_obj.ren_dropoffloc_id\n , rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query('''insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '''\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n '''select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'''\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n else:\n rs = run_query(\n '''select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'''\n , (coupon_obj.cou_rate))\n return rs[0][0]\n\n\ndef insert_cust_coupon(cust_coupon_obj):\n run_query('''insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '''\n ,\n (cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj.cust_type, cust_coupon_obj.coupon_type))\n return\n\n\ndef get_password(username):\n rs = run_query('''select password from zlrz_customer where username = %s''', (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_type(username):\n rs = run_query('''select cust_type from zlrz_customer where username = %s''', (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('''select cust_id from zlrz_customer where username = %s''', (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_all_corporations():\n rs = run_query('''select * from zlrz_corporation''')\n return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2], t[0]), rs))\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s''', (cust_id))\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n\n\ndef get_coupon(cust_id):\n rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s'''\n , (cust_id,))\n res = None\n maxrate = float('-inf')\n if rs is not None:\n coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n for cou in coupons:\n if cou.validstart and cou.validend:\n if (datetime.now() - cou.validstart).days >= 0 and (cou.validend - datetime.now()).days >= 0:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n if not cou.validstart and not cou.validend:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n return res\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_vehicle''')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('''select * from zlrz_customer''')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('''select * from zlrz_corporate''')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('''select * from zlrz_individual''')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3], t[4]), rs))\n\n\ndef get_all_vehicles():\n rs = run_query('''select * from zlrz_vehicle''')\n return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_office_location where ol_id = %s''', (location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_vehicle_class''')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))\n\n\ndef get_vehicle_by_id(vehicle_id):\n rs = run_query('''select * from zlrz_vehicle where veh_id=%s''', (int(vehicle_id),))\n return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))[0] \\\n if rs is not None else None\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query('''select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s''', (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0] if rs is not None else None\n\n\ndef delete_veh_class(vc_num):\n if vc_num == '':\n return\n res = run_query('''select * from zlrz_vehicle where vc_num=%s''', (int(vc_num)))\n if res:\n return 1\n else:\n rs = run_query('''delete from zlrz_vehicle_class where vc_num=%s''', (int(vc_num)))\n return rs\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('''select * from zlrz_office_location where ol_id=%s''', (int(location_id)))\n if res:\n return 1\n else:\n rs = run_query('''delete from zlrz_office_location where ol_id=%s''', (int(location_id)))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('''delete from zlrz_vehicle where veh_id=%s''', (int(veh_id)))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('''delete from zlrz_rental where cust_id=%s''', (int(cust_id)))\n rs4 = run_query('''delete from zlrz_cust_coupon where cust_id=%s''', (int(cust_id)))\n rs2 = run_query('''delete from zlrz_corporate where cust_id=%s''', (int(cust_id)))\n rs3 = run_query('''delete from zlrz_individual where cust_id=%s''', (int(cust_id)))\n rs1 = run_query('''delete from zlrz_customer where cust_id=%s''', (int(cust_id)))\n return rs1\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('''delete from zlrz_cust_coupon where cou_id=%s''', (int(cou_id)))\n rs2 = run_query('''delete from zlrz_coupons where cou_id=%s''', (int(cou_id)))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('''select * from zlrz_corporation where corp_id=%s''', (int(corp_id)))\n if res:\n return 1\n else:\n rs = run_query('''delete from zlrz_corporation where corp_id=%s''', (int(corp_id)))\n return rs\n\ndef update_vehicle_class(class_obj):\n rs = run_query('''update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s''', (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile), class_obj.vc_name))\n return rs",
"step-ids": [
25,
27,
37,
40,
41
]
}
|
[
25,
27,
37,
40,
41
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
from torchvision.utils import make_grid
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from PIL import Image
from IPython.display import display
import warnings
warnings.filterwarnings('ignore')
train_transform = transforms.Compose([
# transforms.RandomRotation(10),
# transforms.RandomHorizontalFlip(),
# transforms.Resize(224),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dataset = datasets.ImageFolder('shapes_dataset_LR',transform=train_transform)
torch.manual_seed(42)
train_data, test_data = torch.utils.data.random_split(dataset, [9000, 1000])
class_names = dataset.classes
train_loader = DataLoader(train_data, batch_size = 10, shuffle = True)
test_loader = DataLoader(test_data, batch_size = 10)
for images, labels in train_loader:
break
im = make_grid(images, nrow=5)
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.486/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
im_inv = inv_normalize(im)
print(labels)
plt.figure(figsize=(12,4))
plt.imshow(np.transpose(im_inv.numpy(), (1,2,0)))
plt.show()
class ConvolutionalNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 3, 1)
self.conv2 = nn.Conv2d(6, 16, 3, 1)
self.fc1 = nn.Linear(54 * 54 * 16, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, X):
X = F.relu(self.conv1(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 54 * 54 * 16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
torch.manual_seed(101)
CNNmodel = ConvolutionalNetwork()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(CNNmodel.parameters(), lr=0.001)
# # to count each class in validation set
# arr = np.array(np.array(dataset.imgs)[test_data.indices, 1], dtype=int)
# cnt = np.zeros((6,1), dtype = int)
# for i in range(1000):
# for j in range(6):
# if arr[i] == j:
# cnt[j] += 1
# break
# print(cnt)
# for reproducable results
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
#The compose function allows for multiple transforms
#transforms.ToTensor() converts our PILImage to a tensor of shape (C x H x W) in the range [0,1]
#transforms.Normalize(mean,std) normalizes a tensor to a (mean, std) for (R, G, B)
# transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
#
# train_set = torchvision.datasets.CIFAR10(root='./cifardata', train=True, download=True, transform=transform)
#
# test_set = torchvision.datasets.CIFAR10(root='./cifardata', train=False, download=True, transform=transform)
#
# classes = ('0', '1', '2', '3', '4', '5')
# x = torch.rand(5, 3)
# print(x)
|
normal
|
{
"blob_id": "7821b07a49db9f3f46bedc30f2271160e281806f",
"index": 4814,
"step-1": "<mask token>\n\n\nclass ConvolutionalNetwork(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 3, 1)\n self.conv2 = nn.Conv2d(6, 16, 3, 1)\n self.fc1 = nn.Linear(54 * 54 * 16, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 2)\n\n def forward(self, X):\n X = F.relu(self.conv1(X))\n X = F.max_pool2d(X, 2, 2)\n X = F.relu(self.conv2(X))\n X = F.max_pool2d(X, 2, 2)\n X = X.view(-1, 54 * 54 * 16)\n X = F.relu(self.fc1(X))\n X = F.relu(self.fc2(X))\n X = self.fc3(X)\n return F.log_softmax(X, dim=1)\n\n\n<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\ntorch.manual_seed(42)\n<mask token>\nfor images, labels in train_loader:\n break\n<mask token>\nprint(labels)\nplt.figure(figsize=(12, 4))\nplt.imshow(np.transpose(im_inv.numpy(), (1, 2, 0)))\nplt.show()\n\n\nclass ConvolutionalNetwork(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 3, 1)\n self.conv2 = nn.Conv2d(6, 16, 3, 1)\n self.fc1 = nn.Linear(54 * 54 * 16, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 2)\n\n def forward(self, X):\n X = F.relu(self.conv1(X))\n X = F.max_pool2d(X, 2, 2)\n X = F.relu(self.conv2(X))\n X = F.max_pool2d(X, 2, 2)\n X = X.view(-1, 54 * 54 * 16)\n X = F.relu(self.fc1(X))\n X = F.relu(self.fc2(X))\n X = self.fc3(X)\n return F.log_softmax(X, dim=1)\n\n\ntorch.manual_seed(101)\n<mask token>\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\ntrain_transform = transforms.Compose([transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\ndataset = datasets.ImageFolder('shapes_dataset_LR', transform=train_transform)\ntorch.manual_seed(42)\ntrain_data, test_data = torch.utils.data.random_split(dataset, [9000, 1000])\nclass_names = dataset.classes\ntrain_loader = DataLoader(train_data, batch_size=10, shuffle=True)\ntest_loader = DataLoader(test_data, batch_size=10)\nfor images, labels in train_loader:\n break\nim = make_grid(images, nrow=5)\ninv_normalize = transforms.Normalize(mean=[-0.485 / 0.229, -0.486 / 0.224, \n -0.406 / 0.225], std=[1 / 0.229, 1 / 0.224, 1 / 0.225])\nim_inv = inv_normalize(im)\nprint(labels)\nplt.figure(figsize=(12, 4))\nplt.imshow(np.transpose(im_inv.numpy(), (1, 2, 0)))\nplt.show()\n\n\nclass ConvolutionalNetwork(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 3, 1)\n self.conv2 = nn.Conv2d(6, 16, 3, 1)\n self.fc1 = nn.Linear(54 * 54 * 16, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 2)\n\n def forward(self, X):\n X = F.relu(self.conv1(X))\n X = F.max_pool2d(X, 2, 2)\n X = F.relu(self.conv2(X))\n X = F.max_pool2d(X, 2, 2)\n X = X.view(-1, 54 * 54 * 16)\n X = F.relu(self.fc1(X))\n X = F.relu(self.fc2(X))\n X = self.fc3(X)\n return F.log_softmax(X, dim=1)\n\n\ntorch.manual_seed(101)\nCNNmodel = ConvolutionalNetwork()\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(CNNmodel.parameters(), lr=0.001)\nseed = 42\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms, models\nfrom torchvision.utils import make_grid\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nfrom PIL import Image\nfrom IPython.display import display\nimport warnings\nwarnings.filterwarnings('ignore')\ntrain_transform = transforms.Compose([transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\ndataset = datasets.ImageFolder('shapes_dataset_LR', transform=train_transform)\ntorch.manual_seed(42)\ntrain_data, test_data = torch.utils.data.random_split(dataset, [9000, 1000])\nclass_names = dataset.classes\ntrain_loader = DataLoader(train_data, batch_size=10, shuffle=True)\ntest_loader = DataLoader(test_data, batch_size=10)\nfor images, labels in train_loader:\n break\nim = make_grid(images, nrow=5)\ninv_normalize = transforms.Normalize(mean=[-0.485 / 0.229, -0.486 / 0.224, \n -0.406 / 0.225], std=[1 / 0.229, 1 / 0.224, 1 / 0.225])\nim_inv = inv_normalize(im)\nprint(labels)\nplt.figure(figsize=(12, 4))\nplt.imshow(np.transpose(im_inv.numpy(), (1, 2, 0)))\nplt.show()\n\n\nclass ConvolutionalNetwork(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 3, 1)\n self.conv2 = nn.Conv2d(6, 16, 3, 1)\n self.fc1 = nn.Linear(54 * 54 * 16, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 2)\n\n def forward(self, X):\n X = F.relu(self.conv1(X))\n X = F.max_pool2d(X, 2, 2)\n X = F.relu(self.conv2(X))\n X = F.max_pool2d(X, 2, 2)\n X = X.view(-1, 54 * 54 * 16)\n X = F.relu(self.fc1(X))\n X = F.relu(self.fc2(X))\n X = self.fc3(X)\n return F.log_softmax(X, dim=1)\n\n\ntorch.manual_seed(101)\nCNNmodel = ConvolutionalNetwork()\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(CNNmodel.parameters(), lr=0.001)\nseed = 42\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n",
"step-5": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import datasets, transforms, models\r\nfrom torchvision.utils import make_grid\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom PIL import Image\r\nfrom IPython.display import display\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\ntrain_transform = transforms.Compose([\r\n # transforms.RandomRotation(10),\r\n # transforms.RandomHorizontalFlip(),\r\n # transforms.Resize(224),\r\n # transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n])\r\n\r\ndataset = datasets.ImageFolder('shapes_dataset_LR',transform=train_transform)\r\ntorch.manual_seed(42)\r\ntrain_data, test_data = torch.utils.data.random_split(dataset, [9000, 1000])\r\n\r\nclass_names = dataset.classes\r\n\r\ntrain_loader = DataLoader(train_data, batch_size = 10, shuffle = True)\r\ntest_loader = DataLoader(test_data, batch_size = 10)\r\n\r\nfor images, labels in train_loader:\r\n break\r\n\r\nim = make_grid(images, nrow=5)\r\n\r\ninv_normalize = transforms.Normalize(\r\n mean=[-0.485/0.229, -0.486/0.224, -0.406/0.225],\r\n std=[1/0.229, 1/0.224, 1/0.225]\r\n)\r\n\r\nim_inv = inv_normalize(im)\r\nprint(labels)\r\nplt.figure(figsize=(12,4))\r\nplt.imshow(np.transpose(im_inv.numpy(), (1,2,0)))\r\nplt.show()\r\n\r\n\r\nclass ConvolutionalNetwork(nn.Module):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.conv1 = nn.Conv2d(3, 6, 3, 1)\r\n self.conv2 = nn.Conv2d(6, 16, 3, 1)\r\n self.fc1 = nn.Linear(54 * 54 * 16, 120)\r\n self.fc2 = nn.Linear(120, 84)\r\n self.fc3 = nn.Linear(84, 2)\r\n\r\n def forward(self, X):\r\n X = F.relu(self.conv1(X))\r\n X = F.max_pool2d(X, 2, 2)\r\n X = F.relu(self.conv2(X))\r\n X = F.max_pool2d(X, 2, 2)\r\n X = X.view(-1, 54 * 54 * 16)\r\n X = F.relu(self.fc1(X))\r\n X = F.relu(self.fc2(X))\r\n X = self.fc3(X)\r\n\r\n return F.log_softmax(X, dim=1)\r\n\r\ntorch.manual_seed(101)\r\nCNNmodel = ConvolutionalNetwork()\r\ncriterion = nn.CrossEntropyLoss()\r\noptimizer = torch.optim.Adam(CNNmodel.parameters(), lr=0.001)\r\n\r\n\r\n\r\n# # to count each class in validation set\r\n# arr = np.array(np.array(dataset.imgs)[test_data.indices, 1], dtype=int)\r\n# cnt = np.zeros((6,1), dtype = int)\r\n# for i in range(1000):\r\n# for j in range(6):\r\n# if arr[i] == j:\r\n# cnt[j] += 1\r\n# break\r\n# print(cnt)\r\n\r\n\r\n# for reproducable results\r\nseed = 42\r\nnp.random.seed(seed)\r\ntorch.manual_seed(seed)\r\n\r\n\r\n#The compose function allows for multiple transforms\r\n#transforms.ToTensor() converts our PILImage to a tensor of shape (C x H x W) in the range [0,1]\r\n#transforms.Normalize(mean,std) normalizes a tensor to a (mean, std) for (R, G, B)\r\n# transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\r\n#\r\n# train_set = torchvision.datasets.CIFAR10(root='./cifardata', train=True, download=True, transform=transform)\r\n#\r\n# test_set = torchvision.datasets.CIFAR10(root='./cifardata', train=False, download=True, transform=transform)\r\n#\r\n# classes = ('0', '1', '2', '3', '4', '5')\r\n\r\n# x = torch.rand(5, 3)\r\n# print(x)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for c in range(0, 7):
pe1 = int(input('Digite o ano de nascimento: '))
pe1 = 2019 - pe1
if pe1 >= 21:
num1 = num1 + 1
print(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')
<|reserved_special_token_1|>
num1 = 0
for c in range(0, 7):
pe1 = int(input('Digite o ano de nascimento: '))
pe1 = 2019 - pe1
if pe1 >= 21:
num1 = num1 + 1
print(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')
<|reserved_special_token_1|>
# Mostra entre as 7 pessoas, quantas pessoas são maiores de idade.
num1 = 0
for c in range(0,7):
pe1 = int(input('Digite o ano de nascimento: '))
pe1 = 2019 - pe1
if pe1 >= 21:
num1 = num1 + 1
print(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')
|
flexible
|
{
"blob_id": "251d589a5815d77d2bc375d8d4a7d41e79a2a5cd",
"index": 5303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c in range(0, 7):\n pe1 = int(input('Digite o ano de nascimento: '))\n pe1 = 2019 - pe1\n if pe1 >= 21:\n num1 = num1 + 1\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')\n",
"step-3": "num1 = 0\nfor c in range(0, 7):\n pe1 = int(input('Digite o ano de nascimento: '))\n pe1 = 2019 - pe1\n if pe1 >= 21:\n num1 = num1 + 1\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')\n",
"step-4": "# Mostra entre as 7 pessoas, quantas pessoas são maiores de idade.\r\n\r\n\r\nnum1 = 0\r\nfor c in range(0,7):\r\n pe1 = int(input('Digite o ano de nascimento: '))\r\n pe1 = 2019 - pe1\r\n if pe1 >= 21:\r\n num1 = num1 + 1\r\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def split_page(result_obj):
"""
分页模块,后台传入一个分页结果集就可以
:param result_obj:
:return:
"""
return_str = '<nav>'
return_str += "<ul class='pagination pull-right'>"
if result_obj.has_previous():
return_str += '<li>'
return_str += "<a href='?page=" + str(result_obj.previous_page_number()
) + "' aria-label='Previous'>"
return_str += "<span aria-hidden='true'>«</span>"
return_str += '</a></li>'
for i in result_obj.paginator.page_range:
hide_page_num = abs(result_obj.number - i)
if hide_page_num <= 3:
return_str += '<li '
if i == result_obj.number:
return_str += "class='active'><a href='?page=" + str(i
) + "'>" + str(i) + '</a></li>'
else:
return_str += "><a href='?page=" + str(i) + "'>" + str(i
) + '</a></li>'
if result_obj.has_next():
return_str += "<li><a href='?page=" + str(result_obj.next_page_number()
) + "' aria-label='Next'>"
return_str += (
"<span aria-hidden='true'>»</span></a></li></ul></nav>")
return return_str
@register.simple_tag
def test(string):
return string
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register.simple_tag
def website_title():
return settings.WEBSITE_TITLE
def split_page(result_obj):
"""
分页模块,后台传入一个分页结果集就可以
:param result_obj:
:return:
"""
return_str = '<nav>'
return_str += "<ul class='pagination pull-right'>"
if result_obj.has_previous():
return_str += '<li>'
return_str += "<a href='?page=" + str(result_obj.previous_page_number()
) + "' aria-label='Previous'>"
return_str += "<span aria-hidden='true'>«</span>"
return_str += '</a></li>'
for i in result_obj.paginator.page_range:
hide_page_num = abs(result_obj.number - i)
if hide_page_num <= 3:
return_str += '<li '
if i == result_obj.number:
return_str += "class='active'><a href='?page=" + str(i
) + "'>" + str(i) + '</a></li>'
else:
return_str += "><a href='?page=" + str(i) + "'>" + str(i
) + '</a></li>'
if result_obj.has_next():
return_str += "<li><a href='?page=" + str(result_obj.next_page_number()
) + "' aria-label='Next'>"
return_str += (
"<span aria-hidden='true'>»</span></a></li></ul></nav>")
return return_str
@register.simple_tag
def test(string):
return string
<|reserved_special_token_1|>
<|reserved_special_token_0|>
register = template.Library()
@register.simple_tag
def website_title():
return settings.WEBSITE_TITLE
def split_page(result_obj):
"""
分页模块,后台传入一个分页结果集就可以
:param result_obj:
:return:
"""
return_str = '<nav>'
return_str += "<ul class='pagination pull-right'>"
if result_obj.has_previous():
return_str += '<li>'
return_str += "<a href='?page=" + str(result_obj.previous_page_number()
) + "' aria-label='Previous'>"
return_str += "<span aria-hidden='true'>«</span>"
return_str += '</a></li>'
for i in result_obj.paginator.page_range:
hide_page_num = abs(result_obj.number - i)
if hide_page_num <= 3:
return_str += '<li '
if i == result_obj.number:
return_str += "class='active'><a href='?page=" + str(i
) + "'>" + str(i) + '</a></li>'
else:
return_str += "><a href='?page=" + str(i) + "'>" + str(i
) + '</a></li>'
if result_obj.has_next():
return_str += "<li><a href='?page=" + str(result_obj.next_page_number()
) + "' aria-label='Next'>"
return_str += (
"<span aria-hidden='true'>»</span></a></li></ul></nav>")
return return_str
@register.simple_tag
def test(string):
return string
<|reserved_special_token_1|>
from django import template
from django.conf import settings
from django.utils.html import format_html
register = template.Library()
@register.simple_tag
def website_title():
return settings.WEBSITE_TITLE
def split_page(result_obj):
"""
分页模块,后台传入一个分页结果集就可以
:param result_obj:
:return:
"""
return_str = '<nav>'
return_str += "<ul class='pagination pull-right'>"
if result_obj.has_previous():
return_str += '<li>'
return_str += "<a href='?page=" + str(result_obj.previous_page_number()
) + "' aria-label='Previous'>"
return_str += "<span aria-hidden='true'>«</span>"
return_str += '</a></li>'
for i in result_obj.paginator.page_range:
hide_page_num = abs(result_obj.number - i)
if hide_page_num <= 3:
return_str += '<li '
if i == result_obj.number:
return_str += "class='active'><a href='?page=" + str(i
) + "'>" + str(i) + '</a></li>'
else:
return_str += "><a href='?page=" + str(i) + "'>" + str(i
) + '</a></li>'
if result_obj.has_next():
return_str += "<li><a href='?page=" + str(result_obj.next_page_number()
) + "' aria-label='Next'>"
return_str += (
"<span aria-hidden='true'>»</span></a></li></ul></nav>")
return return_str
@register.simple_tag
def test(string):
return string
<|reserved_special_token_1|>
#!/usr/bin/env python
from django import template
from django.conf import settings
from django.utils.html import format_html
register = template.Library()
@register.simple_tag
def website_title():
return settings.WEBSITE_TITLE
def split_page(result_obj):
"""
分页模块,后台传入一个分页结果集就可以
:param result_obj:
:return:
"""
return_str = "<nav>"
return_str += "<ul class='pagination pull-right'>"
if result_obj.has_previous():
return_str += "<li>"
return_str += "<a href='?page=" + str(result_obj.previous_page_number()) + "' aria-label='Previous'>"
return_str += "<span aria-hidden='true'>«</span>"
return_str += "</a></li>"
for i in result_obj.paginator.page_range:
# print(i,result_obj.paginator.page_range,result_obj.number)
hide_page_num = abs(result_obj.number - i)
if hide_page_num <= 3: # 3为当前页前后显示多少个
return_str += "<li "
if i == result_obj.number:
return_str += "class='active'><a href='?page=" + str(i) + "'>" + str(i) + "</a></li>"
else:
return_str += "><a href='?page=" + str(i) + "'>" + str(i) + "</a></li>"
if result_obj.has_next():
return_str += "<li><a href='?page=" + str(result_obj.next_page_number()) + "' aria-label='Next'>"
return_str += "<span aria-hidden='true'>»</span></a></li></ul></nav>"
#return format_html(return_str)
return return_str
@register.simple_tag
def test(string):
return string
|
flexible
|
{
"blob_id": "c2c51dcd05c21e91e591de25fc2de034c88c48a1",
"index": 9052,
"step-1": "<mask token>\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\n@register.simple_tag\ndef test(string):\n return string\n",
"step-2": "<mask token>\n\n\n@register.simple_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\n@register.simple_tag\ndef test(string):\n return string\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\n@register.simple_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\n@register.simple_tag\ndef test(string):\n return string\n",
"step-4": "from django import template\nfrom django.conf import settings\nfrom django.utils.html import format_html\nregister = template.Library()\n\n\n@register.simple_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = '<nav>'\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += '<li>'\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()\n ) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += '</a></li>'\n for i in result_obj.paginator.page_range:\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3:\n return_str += '<li '\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i\n ) + \"'>\" + str(i) + '</a></li>'\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i\n ) + '</a></li>'\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()\n ) + \"' aria-label='Next'>\"\n return_str += (\n \"<span aria-hidden='true'>»</span></a></li></ul></nav>\")\n return return_str\n\n\n@register.simple_tag\ndef test(string):\n return string\n",
"step-5": "#!/usr/bin/env python\nfrom django import template\nfrom django.conf import settings\nfrom django.utils.html import format_html\n\n\nregister = template.Library()\n\n@register.simple_tag\ndef website_title():\n return settings.WEBSITE_TITLE\n\n\ndef split_page(result_obj):\n \"\"\"\n 分页模块,后台传入一个分页结果集就可以\n :param result_obj:\n :return:\n \"\"\"\n return_str = \"<nav>\"\n return_str += \"<ul class='pagination pull-right'>\"\n if result_obj.has_previous():\n return_str += \"<li>\"\n return_str += \"<a href='?page=\" + str(result_obj.previous_page_number()) + \"' aria-label='Previous'>\"\n return_str += \"<span aria-hidden='true'>«</span>\"\n return_str += \"</a></li>\"\n\n for i in result_obj.paginator.page_range:\n # print(i,result_obj.paginator.page_range,result_obj.number)\n hide_page_num = abs(result_obj.number - i)\n if hide_page_num <= 3: # 3为当前页前后显示多少个\n return_str += \"<li \"\n if i == result_obj.number:\n return_str += \"class='active'><a href='?page=\" + str(i) + \"'>\" + str(i) + \"</a></li>\"\n else:\n return_str += \"><a href='?page=\" + str(i) + \"'>\" + str(i) + \"</a></li>\"\n\n if result_obj.has_next():\n return_str += \"<li><a href='?page=\" + str(result_obj.next_page_number()) + \"' aria-label='Next'>\"\n return_str += \"<span aria-hidden='true'>»</span></a></li></ul></nav>\"\n\n #return format_html(return_str)\n return return_str\n\n\n@register.simple_tag\ndef test(string):\n return string\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 6 10:05:25 2019
@author: MCA
"""
import smtplib, ssl
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
import os,sys
import time
def loadFiles(subdir, filetype):
"""
example:
dirs = ["dir1", "dir2"]
file_type = ".dat"
files, keys, data = loadFiles(dirs[0], file_type)
"""
dirname = os.path.dirname(__file__)
path = os.path.join(dirname, (subdir+"/"))
files_path = []
fileNamesFiltered = []
for root, dirs, files in os.walk(path):
for i, filename in enumerate(files):
if filename[(len(filename))-len(filetype):] == filetype:
# print(filename)
filename_path = path + filename
files_path.append(filename_path)
fileNamesFiltered.append(filename)
return fileNamesFiltered
def sendMail(filename):
smtp_server = "smtp.seznam.cz"
port = 25 # For starttls
sender_email = "xxx@email.cz"
#password = input("Type your password and press enter: ")
password = "xxxx"
# Create a secure SSL context
context = ssl.create_default_context()
receiver_email=sender_email
#compose an email
message = MIMEMultipart("alternative")
message["Subject"] = ("analysis status check: "+ str(filename))
message["From"] = sender_email
message["To"] = receiver_email
text = "analysis status check"
part1 = MIMEText(text, "plain")
message.attach(part1)
#send file
# filename = file
try:
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
file = MIMEBase("application", "octet-stream")
file.set_payload(attachment.read())
encoders.encode_base64(file)
file.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
message.attach(file)
except:
print("file not found")
# Try to log in to server and send email
try:
server = smtplib.SMTP(smtp_server,port)
server.ehlo() # Can be omitted
server.starttls(context=context) # Secure the connection
server.ehlo() # Can be omitted
server.login(sender_email, password)
print("logged in")
# TODO: Send email here
server.sendmail(sender_email, receiver_email, message.as_string())
print("mail sent")
except Exception as e:
# Print any error messages to stdout
print(e)
finally:
server.quit()
#--------------------------------------------------------------------------------------
if __name__ == "__main__":
run = True
directory = "/folder/folder"
fileType = ".xxx"
name = "xxxxxx_xxx__xxx.xxx"
while run == True:
names = loadFiles(directory, fileType)
print("running")
if name in names:
print("file found:", name)
f = open(name, "r")
for line in f:
if "THE ANALYSIS HAS" in line:
sendMail(name)
print("file sent")
run = False
print("done")
sys.exit()
time.sleep(300)
|
normal
|
{
"blob_id": "b310c35b781e3221e2dacc7717ed77e20001bafa",
"index": 5109,
"step-1": "<mask token>\n\n\ndef loadFiles(subdir, filetype):\n \"\"\"\n example:\n dirs = [\"dir1\", \"dir2\"]\n file_type = \".dat\"\n files, keys, data = loadFiles(dirs[0], file_type)\n \n \"\"\"\n dirname = os.path.dirname(__file__)\n path = os.path.join(dirname, subdir + '/')\n files_path = []\n fileNamesFiltered = []\n for root, dirs, files in os.walk(path):\n for i, filename in enumerate(files):\n if filename[len(filename) - len(filetype):] == filetype:\n filename_path = path + filename\n files_path.append(filename_path)\n fileNamesFiltered.append(filename)\n return fileNamesFiltered\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadFiles(subdir, filetype):\n \"\"\"\n example:\n dirs = [\"dir1\", \"dir2\"]\n file_type = \".dat\"\n files, keys, data = loadFiles(dirs[0], file_type)\n \n \"\"\"\n dirname = os.path.dirname(__file__)\n path = os.path.join(dirname, subdir + '/')\n files_path = []\n fileNamesFiltered = []\n for root, dirs, files in os.walk(path):\n for i, filename in enumerate(files):\n if filename[len(filename) - len(filetype):] == filetype:\n filename_path = path + filename\n files_path.append(filename_path)\n fileNamesFiltered.append(filename)\n return fileNamesFiltered\n\n\ndef sendMail(filename):\n smtp_server = 'smtp.seznam.cz'\n port = 25\n sender_email = 'xxx@email.cz'\n password = 'xxxx'\n context = ssl.create_default_context()\n receiver_email = sender_email\n message = MIMEMultipart('alternative')\n message['Subject'] = 'analysis status check: ' + str(filename)\n message['From'] = sender_email\n message['To'] = receiver_email\n text = 'analysis status check'\n part1 = MIMEText(text, 'plain')\n message.attach(part1)\n try:\n with open(filename, 'rb') as attachment:\n file = MIMEBase('application', 'octet-stream')\n file.set_payload(attachment.read())\n encoders.encode_base64(file)\n file.add_header('Content-Disposition',\n f'attachment; filename= {filename}')\n message.attach(file)\n except:\n print('file not found')\n try:\n server = smtplib.SMTP(smtp_server, port)\n server.ehlo()\n server.starttls(context=context)\n server.ehlo()\n server.login(sender_email, password)\n print('logged in')\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('mail sent')\n except Exception as e:\n print(e)\n finally:\n server.quit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loadFiles(subdir, filetype):\n \"\"\"\n example:\n dirs = [\"dir1\", \"dir2\"]\n file_type = \".dat\"\n files, keys, data = loadFiles(dirs[0], file_type)\n \n \"\"\"\n dirname = os.path.dirname(__file__)\n path = os.path.join(dirname, subdir + '/')\n files_path = []\n fileNamesFiltered = []\n for root, dirs, files in os.walk(path):\n for i, filename in enumerate(files):\n if filename[len(filename) - len(filetype):] == filetype:\n filename_path = path + filename\n files_path.append(filename_path)\n fileNamesFiltered.append(filename)\n return fileNamesFiltered\n\n\ndef sendMail(filename):\n smtp_server = 'smtp.seznam.cz'\n port = 25\n sender_email = 'xxx@email.cz'\n password = 'xxxx'\n context = ssl.create_default_context()\n receiver_email = sender_email\n message = MIMEMultipart('alternative')\n message['Subject'] = 'analysis status check: ' + str(filename)\n message['From'] = sender_email\n message['To'] = receiver_email\n text = 'analysis status check'\n part1 = MIMEText(text, 'plain')\n message.attach(part1)\n try:\n with open(filename, 'rb') as attachment:\n file = MIMEBase('application', 'octet-stream')\n file.set_payload(attachment.read())\n encoders.encode_base64(file)\n file.add_header('Content-Disposition',\n f'attachment; filename= {filename}')\n message.attach(file)\n except:\n print('file not found')\n try:\n server = smtplib.SMTP(smtp_server, port)\n server.ehlo()\n server.starttls(context=context)\n server.ehlo()\n server.login(sender_email, password)\n print('logged in')\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('mail sent')\n except Exception as e:\n print(e)\n finally:\n server.quit()\n\n\nif __name__ == '__main__':\n run = True\n directory = '/folder/folder'\n fileType = '.xxx'\n name = 'xxxxxx_xxx__xxx.xxx'\n while run == True:\n names = loadFiles(directory, fileType)\n print('running')\n if name in names:\n print('file found:', name)\n f = open(name, 'r')\n for line in f:\n if 'THE ANALYSIS HAS' in line:\n sendMail(name)\n print('file sent')\n run = False\n print('done')\n sys.exit()\n time.sleep(300)\n",
"step-4": "<mask token>\nimport smtplib, ssl\nfrom email import encoders\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nimport os, sys\nimport time\n\n\ndef loadFiles(subdir, filetype):\n \"\"\"\n example:\n dirs = [\"dir1\", \"dir2\"]\n file_type = \".dat\"\n files, keys, data = loadFiles(dirs[0], file_type)\n \n \"\"\"\n dirname = os.path.dirname(__file__)\n path = os.path.join(dirname, subdir + '/')\n files_path = []\n fileNamesFiltered = []\n for root, dirs, files in os.walk(path):\n for i, filename in enumerate(files):\n if filename[len(filename) - len(filetype):] == filetype:\n filename_path = path + filename\n files_path.append(filename_path)\n fileNamesFiltered.append(filename)\n return fileNamesFiltered\n\n\ndef sendMail(filename):\n smtp_server = 'smtp.seznam.cz'\n port = 25\n sender_email = 'xxx@email.cz'\n password = 'xxxx'\n context = ssl.create_default_context()\n receiver_email = sender_email\n message = MIMEMultipart('alternative')\n message['Subject'] = 'analysis status check: ' + str(filename)\n message['From'] = sender_email\n message['To'] = receiver_email\n text = 'analysis status check'\n part1 = MIMEText(text, 'plain')\n message.attach(part1)\n try:\n with open(filename, 'rb') as attachment:\n file = MIMEBase('application', 'octet-stream')\n file.set_payload(attachment.read())\n encoders.encode_base64(file)\n file.add_header('Content-Disposition',\n f'attachment; filename= {filename}')\n message.attach(file)\n except:\n print('file not found')\n try:\n server = smtplib.SMTP(smtp_server, port)\n server.ehlo()\n server.starttls(context=context)\n server.ehlo()\n server.login(sender_email, password)\n print('logged in')\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('mail sent')\n except Exception as e:\n print(e)\n finally:\n server.quit()\n\n\nif __name__ == '__main__':\n run = True\n directory = '/folder/folder'\n fileType = '.xxx'\n name = 'xxxxxx_xxx__xxx.xxx'\n while run == True:\n names = loadFiles(directory, fileType)\n print('running')\n if name in names:\n print('file found:', name)\n f = open(name, 'r')\n for line in f:\n if 'THE ANALYSIS HAS' in line:\n sendMail(name)\n print('file sent')\n run = False\n print('done')\n sys.exit()\n time.sleep(300)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 6 10:05:25 2019\n\n@author: MCA\n\"\"\"\n\n\nimport smtplib, ssl\n\nfrom email import encoders\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\n\nimport os,sys\nimport time\n\ndef loadFiles(subdir, filetype):\n \"\"\"\n example:\n dirs = [\"dir1\", \"dir2\"]\n file_type = \".dat\"\n files, keys, data = loadFiles(dirs[0], file_type)\n \n \"\"\" \n \n dirname = os.path.dirname(__file__)\n path = os.path.join(dirname, (subdir+\"/\"))\n files_path = []\n fileNamesFiltered = []\n for root, dirs, files in os.walk(path): \n for i, filename in enumerate(files):\n if filename[(len(filename))-len(filetype):] == filetype:\n# print(filename)\n filename_path = path + filename\n files_path.append(filename_path)\n fileNamesFiltered.append(filename)\n \n \n return fileNamesFiltered\n\n\n\ndef sendMail(filename):\n smtp_server = \"smtp.seznam.cz\"\n port = 25 # For starttls\n sender_email = \"xxx@email.cz\"\n #password = input(\"Type your password and press enter: \")\n password = \"xxxx\"\n \n # Create a secure SSL context\n context = ssl.create_default_context()\n receiver_email=sender_email\n \n \n #compose an email\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = (\"analysis status check: \"+ str(filename))\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n \n text = \"analysis status check\"\n part1 = MIMEText(text, \"plain\")\n message.attach(part1)\n \n \n #send file\n# filename = file\n try:\n with open(filename, \"rb\") as attachment:\n # Add file as application/octet-stream\n # Email client can usually download this automatically as attachment\n file = MIMEBase(\"application\", \"octet-stream\")\n file.set_payload(attachment.read())\n encoders.encode_base64(file)\n file.add_header(\n \"Content-Disposition\",\n f\"attachment; filename= {filename}\",\n )\n message.attach(file)\n except:\n print(\"file not found\")\n \n # Try to log in to server and send email\n try:\n server = smtplib.SMTP(smtp_server,port)\n server.ehlo() # Can be omitted\n server.starttls(context=context) # Secure the connection\n server.ehlo() # Can be omitted\n server.login(sender_email, password)\n print(\"logged in\")\n # TODO: Send email here\n server.sendmail(sender_email, receiver_email, message.as_string())\n print(\"mail sent\")\n except Exception as e:\n # Print any error messages to stdout\n print(e)\n finally:\n server.quit() \n\n\n#--------------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\n run = True\n directory = \"/folder/folder\"\n fileType = \".xxx\"\n name = \"xxxxxx_xxx__xxx.xxx\"\n \n while run == True:\n \n names = loadFiles(directory, fileType)\n print(\"running\")\n if name in names:\n print(\"file found:\", name)\n f = open(name, \"r\")\n for line in f:\n if \"THE ANALYSIS HAS\" in line: \n sendMail(name)\n print(\"file sent\")\n run = False\n print(\"done\")\n sys.exit()\n time.sleep(300)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class HTTPClient(abc.ABC):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@abc.abstractmethod
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
"""Perform POST request to a defined URL"""
pass
class RequestsHTTPClient(HTTPClient):
"""
Simple wrapper class around requests library, which is used as the
main engine for each call. Allow better unit-testing overall.
"""
def enable_cache(self, **kwargs: Dict[str, Any]) ->None:
requests_cache.install_cache(self.cache_name, backend=self.backend,
expire_after=self.expire_after, allowable_methods=self.
allowable_methods, **kwargs)
self.cache_enabled = True
def disable_cache(self) ->None:
requests_cache.disable_cache()
requests_cache.uninstall_cache()
self.cache_enabled = False
def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.post(url, **kwargs)
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HTTPClient(abc.ABC):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@abc.abstractmethod
def disable_cache(self) ->None:
"""Disable caching"""
pass
@abc.abstractmethod
def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:
"""Perform GET request to a defined URL"""
pass
@abc.abstractmethod
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
"""Perform POST request to a defined URL"""
pass
class RequestsHTTPClient(HTTPClient):
"""
Simple wrapper class around requests library, which is used as the
main engine for each call. Allow better unit-testing overall.
"""
def enable_cache(self, **kwargs: Dict[str, Any]) ->None:
requests_cache.install_cache(self.cache_name, backend=self.backend,
expire_after=self.expire_after, allowable_methods=self.
allowable_methods, **kwargs)
self.cache_enabled = True
def disable_cache(self) ->None:
requests_cache.disable_cache()
requests_cache.uninstall_cache()
self.cache_enabled = False
def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.post(url, **kwargs)
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HTTPClient(abc.ABC):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@abc.abstractmethod
def enable_cache(self, **kwargs: Dict[str, Any]) ->None:
"""Enable caching for each request"""
pass
@abc.abstractmethod
def disable_cache(self) ->None:
"""Disable caching"""
pass
@abc.abstractmethod
def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:
"""Perform GET request to a defined URL"""
pass
@abc.abstractmethod
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
"""Perform POST request to a defined URL"""
pass
class RequestsHTTPClient(HTTPClient):
"""
Simple wrapper class around requests library, which is used as the
main engine for each call. Allow better unit-testing overall.
"""
def enable_cache(self, **kwargs: Dict[str, Any]) ->None:
requests_cache.install_cache(self.cache_name, backend=self.backend,
expire_after=self.expire_after, allowable_methods=self.
allowable_methods, **kwargs)
self.cache_enabled = True
def disable_cache(self) ->None:
requests_cache.disable_cache()
requests_cache.uninstall_cache()
self.cache_enabled = False
def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.post(url, **kwargs)
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import abc
import requests
from typing import Dict, Tuple, Any
from .exceptions import HTTPError, UnexpectedError
import requests_cache
class HTTPClient(abc.ABC):
"""Basic interface class. Allow to define custom HTTP clients giving
stronger contract behaviour
:type cache_name: str
:param cache_name: The name of the cache, corresponds to the name of the
sqlite DB on the filesystem if the `beckend` is sqlite
or the name of the redis namespace in case of `redis`
backend.
:type backend: str
:param backend: The backend to use, can be either `memory` to use a simple
python dict, `sqlite` to use a sqlite DB on the filesystem
or `redis` for a redis cache
:type expire_after: int
:param expire_after: Define after how many seconds each key in the cache
have to be evicted
:type allowable_methods: Tuple[str]
:param allowable_methods: A tuple of strings defining for which HTTP
methods to apply caching
Also supports `connection` in case of a redis connection on kwargs,
for more info `https://requests-cache.readthedocs.io/en/latest/api.html`
"""
def __init__(self, cache_name: str='', *, backend: str='memory',
expire_after: int=3600, allowable_methods: Tuple[str]=('GET',), **
kwargs):
self.cache_name = cache_name
self.backend = backend
self.expire_after = expire_after
self.allowable_methods = allowable_methods
self.cache_enabled = False
if self.cache_name:
self.enable_cache(**kwargs)
@abc.abstractmethod
def enable_cache(self, **kwargs: Dict[str, Any]) ->None:
"""Enable caching for each request"""
pass
@abc.abstractmethod
def disable_cache(self) ->None:
"""Disable caching"""
pass
@abc.abstractmethod
def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:
"""Perform GET request to a defined URL"""
pass
@abc.abstractmethod
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
"""Perform POST request to a defined URL"""
pass
class RequestsHTTPClient(HTTPClient):
"""
Simple wrapper class around requests library, which is used as the
main engine for each call. Allow better unit-testing overall.
"""
def enable_cache(self, **kwargs: Dict[str, Any]) ->None:
requests_cache.install_cache(self.cache_name, backend=self.backend,
expire_after=self.expire_after, allowable_methods=self.
allowable_methods, **kwargs)
self.cache_enabled = True
def disable_cache(self) ->None:
requests_cache.disable_cache()
requests_cache.uninstall_cache()
self.cache_enabled = False
def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:
try:
response = requests.post(url, **kwargs)
except (requests.exceptions.HTTPError, requests.exceptions.
TooManyRedirects) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
<|reserved_special_token_1|>
"""
pokespeare.http.py
~~~~~~~~~~~~~~~~~~
Contains definitions of custom HTTP clients, allowing for more flexibility on
the library choice
"""
import abc
import requests
from typing import Dict, Tuple, Any
from .exceptions import HTTPError, UnexpectedError
import requests_cache
class HTTPClient(abc.ABC):
"""Basic interface class. Allow to define custom HTTP clients giving
stronger contract behaviour
:type cache_name: str
:param cache_name: The name of the cache, corresponds to the name of the
sqlite DB on the filesystem if the `beckend` is sqlite
or the name of the redis namespace in case of `redis`
backend.
:type backend: str
:param backend: The backend to use, can be either `memory` to use a simple
python dict, `sqlite` to use a sqlite DB on the filesystem
or `redis` for a redis cache
:type expire_after: int
:param expire_after: Define after how many seconds each key in the cache
have to be evicted
:type allowable_methods: Tuple[str]
:param allowable_methods: A tuple of strings defining for which HTTP
methods to apply caching
Also supports `connection` in case of a redis connection on kwargs,
for more info `https://requests-cache.readthedocs.io/en/latest/api.html`
"""
def __init__(
self,
cache_name: str = "",
*,
backend: str = "memory",
expire_after: int = 3600,
allowable_methods: Tuple[str] = ("GET",),
**kwargs
):
self.cache_name = cache_name
self.backend = backend
self.expire_after = expire_after
self.allowable_methods = allowable_methods
self.cache_enabled = False
if self.cache_name:
self.enable_cache(**kwargs)
@abc.abstractmethod
def enable_cache(self, **kwargs: Dict[str, Any]) -> None:
"""Enable caching for each request"""
pass
@abc.abstractmethod
def disable_cache(self) -> None:
"""Disable caching"""
pass
@abc.abstractmethod
def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:
"""Perform GET request to a defined URL"""
pass
@abc.abstractmethod
def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:
"""Perform POST request to a defined URL"""
pass
class RequestsHTTPClient(HTTPClient):
"""
Simple wrapper class around requests library, which is used as the
main engine for each call. Allow better unit-testing overall.
"""
def enable_cache(self, **kwargs: Dict[str, Any]) -> None:
requests_cache.install_cache(
self.cache_name,
backend=self.backend,
expire_after=self.expire_after,
allowable_methods=self.allowable_methods,
**kwargs
)
self.cache_enabled = True
def disable_cache(self) -> None:
requests_cache.disable_cache()
requests_cache.uninstall_cache()
self.cache_enabled = False
def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
except (
requests.exceptions.HTTPError,
requests.exceptions.TooManyRedirects,
) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:
try:
response = requests.post(url, **kwargs)
except (
requests.exceptions.HTTPError,
requests.exceptions.TooManyRedirects,
) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
|
flexible
|
{
"blob_id": "1a126ba7e73eb2e7811ab32146fe5aee6c6b30f9",
"index": 4290,
"step-1": "<mask token>\n\n\nclass HTTPClient(abc.ABC):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-2": "<mask token>\n\n\nclass HTTPClient(abc.ABC):\n <mask token>\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def disable_cache(self) ->None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-3": "<mask token>\n\n\nclass HTTPClient(abc.ABC):\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n \"\"\"Enable caching for each request\"\"\"\n pass\n\n @abc.abstractmethod\n def disable_cache(self) ->None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-4": "<mask token>\nimport abc\nimport requests\nfrom typing import Dict, Tuple, Any\nfrom .exceptions import HTTPError, UnexpectedError\nimport requests_cache\n\n\nclass HTTPClient(abc.ABC):\n \"\"\"Basic interface class. Allow to define custom HTTP clients giving\n stronger contract behaviour\n\n :type cache_name: str\n :param cache_name: The name of the cache, corresponds to the name of the\n sqlite DB on the filesystem if the `beckend` is sqlite\n or the name of the redis namespace in case of `redis`\n backend.\n\n :type backend: str\n :param backend: The backend to use, can be either `memory` to use a simple\n python dict, `sqlite` to use a sqlite DB on the filesystem\n or `redis` for a redis cache\n\n :type expire_after: int\n :param expire_after: Define after how many seconds each key in the cache\n have to be evicted\n\n :type allowable_methods: Tuple[str]\n :param allowable_methods: A tuple of strings defining for which HTTP\n methods to apply caching\n\n Also supports `connection` in case of a redis connection on kwargs,\n for more info `https://requests-cache.readthedocs.io/en/latest/api.html`\n \"\"\"\n\n def __init__(self, cache_name: str='', *, backend: str='memory',\n expire_after: int=3600, allowable_methods: Tuple[str]=('GET',), **\n kwargs):\n self.cache_name = cache_name\n self.backend = backend\n self.expire_after = expire_after\n self.allowable_methods = allowable_methods\n self.cache_enabled = False\n if self.cache_name:\n self.enable_cache(**kwargs)\n\n @abc.abstractmethod\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n \"\"\"Enable caching for each request\"\"\"\n pass\n\n @abc.abstractmethod\n def disable_cache(self) ->None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-5": "\"\"\"\npokespeare.http.py\n~~~~~~~~~~~~~~~~~~\n\nContains definitions of custom HTTP clients, allowing for more flexibility on\nthe library choice\n\"\"\"\n\nimport abc\nimport requests\nfrom typing import Dict, Tuple, Any\nfrom .exceptions import HTTPError, UnexpectedError\nimport requests_cache\n\n\nclass HTTPClient(abc.ABC):\n \"\"\"Basic interface class. Allow to define custom HTTP clients giving\n stronger contract behaviour\n\n :type cache_name: str\n :param cache_name: The name of the cache, corresponds to the name of the\n sqlite DB on the filesystem if the `beckend` is sqlite\n or the name of the redis namespace in case of `redis`\n backend.\n\n :type backend: str\n :param backend: The backend to use, can be either `memory` to use a simple\n python dict, `sqlite` to use a sqlite DB on the filesystem\n or `redis` for a redis cache\n\n :type expire_after: int\n :param expire_after: Define after how many seconds each key in the cache\n have to be evicted\n\n :type allowable_methods: Tuple[str]\n :param allowable_methods: A tuple of strings defining for which HTTP\n methods to apply caching\n\n Also supports `connection` in case of a redis connection on kwargs,\n for more info `https://requests-cache.readthedocs.io/en/latest/api.html`\n \"\"\"\n\n def __init__(\n self,\n cache_name: str = \"\",\n *,\n backend: str = \"memory\",\n expire_after: int = 3600,\n allowable_methods: Tuple[str] = (\"GET\",),\n **kwargs\n ):\n self.cache_name = cache_name\n self.backend = backend\n self.expire_after = expire_after\n self.allowable_methods = allowable_methods\n self.cache_enabled = False\n if self.cache_name:\n self.enable_cache(**kwargs)\n\n @abc.abstractmethod\n def enable_cache(self, **kwargs: Dict[str, Any]) -> None:\n \"\"\"Enable caching for each request\"\"\"\n pass\n\n @abc.abstractmethod\n def disable_cache(self) -> None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) -> None:\n requests_cache.install_cache(\n self.cache_name,\n backend=self.backend,\n expire_after=self.expire_after,\n allowable_methods=self.allowable_methods,\n **kwargs\n )\n self.cache_enabled = True\n\n def disable_cache(self) -> None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (\n requests.exceptions.HTTPError,\n requests.exceptions.TooManyRedirects,\n ) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n try:\n response = requests.post(url, **kwargs)\n except (\n requests.exceptions.HTTPError,\n requests.exceptions.TooManyRedirects,\n ) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-ids": [
8,
10,
11,
14,
15
]
}
|
[
8,
10,
11,
14,
15
] |
nome = str(input('Digite um nome completo: ')).lower()
silva = 'silva' in nome
if silva == True:
print('Existe Silva nesse nome')
else:
print('Não há Silva nesse nome')
|
normal
|
{
"blob_id": "faebefcadbc184fab29deb2988089223a8f09e7e",
"index": 8219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif silva == True:\n print('Existe Silva nesse nome')\nelse:\n print('Não há Silva nesse nome')\n",
"step-3": "nome = str(input('Digite um nome completo: ')).lower()\nsilva = 'silva' in nome\nif silva == True:\n print('Existe Silva nesse nome')\nelse:\n print('Não há Silva nesse nome')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
<|reserved_special_token_0|>
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
<|reserved_special_token_0|>
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
<|reserved_special_token_0|>
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root.title('Tic-Tac-Toe')
root.geometry('600x600')
<|reserved_special_token_0|>
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
<|reserved_special_token_0|>
global connection_established
<|reserved_special_token_0|>
sock.connect((HOST, PORT))
<|reserved_special_token_0|>
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
<|reserved_special_token_0|>
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand=YES)
root.after(5000, lambda : root.destroy())
root.mainloop()
<|reserved_special_token_0|>
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
<|reserved_special_token_0|>
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
<|reserved_special_token_0|>
b1.grid(row=0, column=0)
<|reserved_special_token_0|>
b2.grid(row=0, column=1)
<|reserved_special_token_0|>
b3.grid(row=0, column=2)
<|reserved_special_token_0|>
b4.grid(row=1, column=0)
<|reserved_special_token_0|>
b5.grid(row=1, column=1)
<|reserved_special_token_0|>
b6.grid(row=1, column=2)
<|reserved_special_token_0|>
b7.grid(row=2, column=0)
<|reserved_special_token_0|>
b8.grid(row=2, column=1)
<|reserved_special_token_0|>
b9.grid(row=2, column=2)
<|reserved_special_token_0|>
labels.grid(row=3, column=0)
for w in New.winfo_children():
w.configure(state='disabled')
New.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = Tk()
root.title('Tic-Tac-Toe')
root.geometry('600x600')
winner = False
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
HOST = '127.0.0.1'
PORT = 65432
global connection_established
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
connection_established = True
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
image = Image.open('C:\\Users\\User\\Any_Path\\Tic-tac-toe1.png')
copy_of_image = image.copy()
photo = ImageTk.PhotoImage(image)
label = ttk.Label(root, image=photo)
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand=YES)
root.after(5000, lambda : root.destroy())
root.mainloop()
New = Tk()
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
clicked = 'Y'
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
b1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b1))
b1.grid(row=0, column=0)
b2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b2))
b2.grid(row=0, column=1)
b3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b3))
b3.grid(row=0, column=2)
b4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b4))
b4.grid(row=1, column=0)
b5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b5))
b5.grid(row=1, column=1)
b6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b6))
b6.grid(row=1, column=2)
b7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b7))
b7.grid(row=2, column=0)
b8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b8))
b8.grid(row=2, column=1)
b9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b9))
b9.grid(row=2, column=2)
labels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',
height=2, justify='center')
labels.grid(row=3, column=0)
for w in New.winfo_children():
w.configure(state='disabled')
New.mainloop()
<|reserved_special_token_1|>
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from PIL import Image, ImageTk
import time
import socket
import threading
root = Tk()
root.title('Tic-Tac-Toe')
root.geometry('600x600')
winner = False
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
HOST = '127.0.0.1'
PORT = 65432
global connection_established
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
connection_established = True
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
image = Image.open('C:\\Users\\User\\Any_Path\\Tic-tac-toe1.png')
copy_of_image = image.copy()
photo = ImageTk.PhotoImage(image)
label = ttk.Label(root, image=photo)
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand=YES)
root.after(5000, lambda : root.destroy())
root.mainloop()
New = Tk()
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
clicked = 'Y'
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
b1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b1))
b1.grid(row=0, column=0)
b2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b2))
b2.grid(row=0, column=1)
b3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b3))
b3.grid(row=0, column=2)
b4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b4))
b4.grid(row=1, column=0)
b5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b5))
b5.grid(row=1, column=1)
b6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b6))
b6.grid(row=1, column=2)
b7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b7))
b7.grid(row=2, column=0)
b8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b8))
b8.grid(row=2, column=1)
b9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b9))
b9.grid(row=2, column=2)
labels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',
height=2, justify='center')
labels.grid(row=3, column=0)
for w in New.winfo_children():
w.configure(state='disabled')
New.mainloop()
<|reserved_special_token_1|>
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from PIL import Image, ImageTk
import time
import socket
import threading
root = Tk()
root.title("Tic-Tac-Toe")
root.geometry('600x600')
winner = False
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
HOST = '127.0.0.1'
PORT = 65432
global connection_established
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
connection_established = True
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is',data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b1.config(state="disabled")
elif data == 'button2' :
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b2.config(state="disabled")
elif data == 'button3' :
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b3.config(state="disabled")
elif data == 'button4' :
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b4.config(state="disabled")
elif data == 'button5' :
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b5.config(state="disabled")
elif data == 'button6' :
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b6.config(state="disabled")
elif data == 'button7' :
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b7.config(state="disabled")
elif data == 'button8' :
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b8.config(state="disabled")
elif data == 'button9' :
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b9.config(state="disabled")
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image = photo)
label.image = photo #avoid garbage collection
image = Image.open('C:\\Users\\User\\Any_Path\\Tic-tac-toe1.png')
copy_of_image = image.copy()
photo = ImageTk.PhotoImage(image)
label = ttk.Label(root, image = photo)
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand = YES)
root.after(5000, lambda: root.destroy()) # Destroy the widget after 30 seconds
root.mainloop()
New = Tk()
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
clicked = 'Y'
def checkwin():
global winner
winner = False
if b1["text"] == "X" and b2["text"] == "X" and b3["text"] == "X":
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b4["text"] == "X" and b5["text"] == "X" and b6["text"] == "X":
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b7["text"] == "X" and b8["text"] == "X" and b9["text"] == "X":
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b1["text"] == "X" and b4["text"] == "X" and b7["text"] == "X":
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b2["text"] == "X" and b5["text"] == "X" and b8["text"] == "X":
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b3["text"] == "X" and b6["text"] == "X" and b9["text"] == "X":
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b1["text"] == "X" and b5["text"] == "X" and b9["text"] == "X":
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b3["text"] == "X" and b5["text"] == "X" and b7["text"] == "X":
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
###################################
elif b1["text"] == "O" and b2["text"] == "O" and b3["text"] == "O":
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b4["text"] == "O" and b5["text"] == "O" and b6["text"] == "O":
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b7["text"] == "O" and b8["text"] == "O" and b9["text"] == "O":
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b1["text"] == "O" and b4["text"] == "O" and b7["text"] == "O":
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b2["text"] == "O" and b5["text"] == "O" and b8["text"] == "O":
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b3["text"] == "O" and b6["text"] == "O" and b9["text"] == "O":
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b1["text"] == "O" and b5["text"] == "O" and b9["text"] == "O":
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo('Winner',"Congo!!!!!!!O Wins!!!!!!!!")
elif b3["text"] == "O" and b5["text"] == "O" and b7["text"] == "O":
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b["text"] == '' and b['state'] != 'disabled' :
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state="disabled")
b1 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b1))
b1.grid(row=0,column=0)
b2 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b2))
b2.grid(row=0,column=1)
b3 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b3))
b3.grid(row=0,column=2)
b4 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b4))
b4.grid(row=1,column=0)
b5 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b5))
b5.grid(row=1,column=1)
b6 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b6))
b6.grid(row=1,column=2)
b7 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b7))
b7.grid(row=2,column=0)
b8 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b8))
b8.grid(row=2,column=1)
b9 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b9))
b9.grid(row=2,column=2)
labels = Label(New, fg="white",bg="black", pady=1,text="Opponent Turn ",height=2,justify="center")
labels.grid(row=3,column=0)
for w in New.winfo_children():
w.configure(state="disabled")
#menu = Menu(New)
#New.config(menu=menu)
#options = Menu(menu,tearoff=False)
New.mainloop()
|
flexible
|
{
"blob_id": "cc924892afe179e55166ea9b237b2bfe8ea900df",
"index": 2120,
"step-1": "<mask token>\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\n<mask token>\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\n<mask token>\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\n<mask token>\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\n<mask token>\n",
"step-2": "<mask token>\nroot.title('Tic-Tac-Toe')\nroot.geometry('600x600')\n<mask token>\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\n<mask token>\nglobal connection_established\n<mask token>\nsock.connect((HOST, PORT))\n<mask token>\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\nstart_thread(receive_data)\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\n<mask token>\nlabel.bind('<Configure>', resize_image)\nlabel.pack(fill=BOTH, expand=YES)\nroot.after(5000, lambda : root.destroy())\nroot.mainloop()\n<mask token>\nNew.title('Tic-Tac-Toe')\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\n<mask token>\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\n<mask token>\nb1.grid(row=0, column=0)\n<mask token>\nb2.grid(row=0, column=1)\n<mask token>\nb3.grid(row=0, column=2)\n<mask token>\nb4.grid(row=1, column=0)\n<mask token>\nb5.grid(row=1, column=1)\n<mask token>\nb6.grid(row=1, column=2)\n<mask token>\nb7.grid(row=2, column=0)\n<mask token>\nb8.grid(row=2, column=1)\n<mask token>\nb9.grid(row=2, column=2)\n<mask token>\nlabels.grid(row=3, column=0)\nfor w in New.winfo_children():\n w.configure(state='disabled')\nNew.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nroot.title('Tic-Tac-Toe')\nroot.geometry('600x600')\nwinner = False\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\nHOST = '127.0.0.1'\nPORT = 65432\nglobal connection_established\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((HOST, PORT))\nconnection_established = True\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\nstart_thread(receive_data)\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\nimage = Image.open('C:\\\\Users\\\\User\\\\Any_Path\\\\Tic-tac-toe1.png')\ncopy_of_image = image.copy()\nphoto = ImageTk.PhotoImage(image)\nlabel = ttk.Label(root, image=photo)\nlabel.bind('<Configure>', resize_image)\nlabel.pack(fill=BOTH, expand=YES)\nroot.after(5000, lambda : root.destroy())\nroot.mainloop()\nNew = Tk()\nNew.title('Tic-Tac-Toe')\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\nclicked = 'Y'\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\nb1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b1))\nb1.grid(row=0, column=0)\nb2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b2))\nb2.grid(row=0, column=1)\nb3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b3))\nb3.grid(row=0, column=2)\nb4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b4))\nb4.grid(row=1, column=0)\nb5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b5))\nb5.grid(row=1, column=1)\nb6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b6))\nb6.grid(row=1, column=2)\nb7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b7))\nb7.grid(row=2, column=0)\nb8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b8))\nb8.grid(row=2, column=1)\nb9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b9))\nb9.grid(row=2, column=2)\nlabels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',\n height=2, justify='center')\nlabels.grid(row=3, column=0)\nfor w in New.winfo_children():\n w.configure(state='disabled')\nNew.mainloop()\n",
"step-4": "from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nimport time\nimport socket\nimport threading\nroot = Tk()\nroot.title('Tic-Tac-Toe')\nroot.geometry('600x600')\nwinner = False\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\nHOST = '127.0.0.1'\nPORT = 65432\nglobal connection_established\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((HOST, PORT))\nconnection_established = True\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\nstart_thread(receive_data)\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\nimage = Image.open('C:\\\\Users\\\\User\\\\Any_Path\\\\Tic-tac-toe1.png')\ncopy_of_image = image.copy()\nphoto = ImageTk.PhotoImage(image)\nlabel = ttk.Label(root, image=photo)\nlabel.bind('<Configure>', resize_image)\nlabel.pack(fill=BOTH, expand=YES)\nroot.after(5000, lambda : root.destroy())\nroot.mainloop()\nNew = Tk()\nNew.title('Tic-Tac-Toe')\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\nclicked = 'Y'\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\nb1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b1))\nb1.grid(row=0, column=0)\nb2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b2))\nb2.grid(row=0, column=1)\nb3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b3))\nb3.grid(row=0, column=2)\nb4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b4))\nb4.grid(row=1, column=0)\nb5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b5))\nb5.grid(row=1, column=1)\nb6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b6))\nb6.grid(row=1, column=2)\nb7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b7))\nb7.grid(row=2, column=0)\nb8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b8))\nb8.grid(row=2, column=1)\nb9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b9))\nb9.grid(row=2, column=2)\nlabels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',\n height=2, justify='center')\nlabels.grid(row=3, column=0)\nfor w in New.winfo_children():\n w.configure(state='disabled')\nNew.mainloop()\n",
"step-5": "from tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\nfrom PIL import Image, ImageTk\r\nimport time\r\nimport socket\r\nimport threading\r\nroot = Tk()\r\nroot.title(\"Tic-Tac-Toe\")\r\nroot.geometry('600x600')\r\n\r\n\r\nwinner = False\r\n\r\ndef start_thread(target):\r\n thread = threading.Thread(target=target)\r\n thread.daemon = True\r\n thread.start()\r\n \r\nHOST = '127.0.0.1' \r\nPORT = 65432\r\nglobal connection_established\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsock.connect((HOST, PORT))\r\n\r\nconnection_established = True\r\n\r\ndef receive_data():\r\n \r\n while True:\r\n data = sock.recv(1024).decode()\r\n print('decoded is',data)\r\n \r\n if data == 'button': \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b1.config(text='X') \r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b1.config(state=\"disabled\")\r\n \r\n \r\n \r\n elif data == 'button2' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b2.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b2.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button3' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b3.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b3.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button4' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b4.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b4.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button5' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b5.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b5.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button6' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b6.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b6.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button7' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b7.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b7.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button8' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b8.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b8.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button9' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b9.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b9.config(state=\"disabled\")\r\n \r\n \r\n \r\nstart_thread(receive_data)\r\n\r\ndef resize_image(event):\r\n new_width = event.width\r\n new_height = event.height\r\n image = copy_of_image.resize((new_width, new_height))\r\n photo = ImageTk.PhotoImage(image)\r\n label.config(image = photo)\r\n label.image = photo #avoid garbage collection\r\n\r\nimage = Image.open('C:\\\\Users\\\\User\\\\Any_Path\\\\Tic-tac-toe1.png')\r\ncopy_of_image = image.copy()\r\nphoto = ImageTk.PhotoImage(image)\r\nlabel = ttk.Label(root, image = photo)\r\nlabel.bind('<Configure>', resize_image)\r\nlabel.pack(fill=BOTH, expand = YES)\r\n\r\nroot.after(5000, lambda: root.destroy()) # Destroy the widget after 30 seconds\r\nroot.mainloop()\r\n\r\nNew = Tk()\r\nNew.title('Tic-Tac-Toe')\r\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\r\n\r\n\r\n\r\nclicked = 'Y'\r\ndef checkwin():\r\n global winner\r\n winner = False\r\n if b1[\"text\"] == \"X\" and b2[\"text\"] == \"X\" and b3[\"text\"] == \"X\":\r\n b1.config(bg='green')\r\n b2.config(bg='green')\r\n b3.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b4[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b6[\"text\"] == \"X\":\r\n b4.config(bg='green')\r\n b5.config(bg='green')\r\n b6.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\") \r\n \r\n elif b7[\"text\"] == \"X\" and b8[\"text\"] == \"X\" and b9[\"text\"] == \"X\":\r\n b7.config(bg='green')\r\n b8.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"X\" and b4[\"text\"] == \"X\" and b7[\"text\"] == \"X\":\r\n b1.config(bg='green')\r\n b4.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b2[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b8[\"text\"] == \"X\":\r\n b2.config(bg='green')\r\n b5.config(bg='green')\r\n b8.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\") \r\n \r\n elif b3[\"text\"] == \"X\" and b6[\"text\"] == \"X\" and b9[\"text\"] == \"X\":\r\n b3.config(bg='green')\r\n b6.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b9[\"text\"] == \"X\":\r\n b1.config(bg='green')\r\n b5.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b3[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b7[\"text\"] == \"X\":\r\n b3.config(bg='green')\r\n b5.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\") \r\n \r\n ###################################\r\n \r\n \r\n elif b1[\"text\"] == \"O\" and b2[\"text\"] == \"O\" and b3[\"text\"] == \"O\":\r\n b1.config(bg='green')\r\n b2.config(bg='green')\r\n b3.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b4[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b6[\"text\"] == \"O\":\r\n b4.config(bg='green')\r\n b5.config(bg='green')\r\n b6.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\") \r\n \r\n elif b7[\"text\"] == \"O\" and b8[\"text\"] == \"O\" and b9[\"text\"] == \"O\":\r\n b7.config(bg='green')\r\n b8.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"O\" and b4[\"text\"] == \"O\" and b7[\"text\"] == \"O\":\r\n b1.config(bg='green')\r\n b4.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b2[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b8[\"text\"] == \"O\":\r\n b2.config(bg='green')\r\n b5.config(bg='green')\r\n b8.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\") \r\n \r\n elif b3[\"text\"] == \"O\" and b6[\"text\"] == \"O\" and b9[\"text\"] == \"O\":\r\n b3.config(bg='green')\r\n b6.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b9[\"text\"] == \"O\":\r\n b1.config(bg='green')\r\n b5.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo('Winner',\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n\r\n elif b3[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b7[\"text\"] == \"O\":\r\n b3.config(bg='green')\r\n b5.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n\r\n \r\n \r\n \r\ndef b_click(b):\r\n \r\n to_send = str(b)\r\n \r\n to_send = to_send.replace('.', '')\r\n to_send = str(to_send.replace('!', ''))\r\n print(to_send)\r\n global clicked\r\n if b[\"text\"] == '' and b['state'] != 'disabled' :\r\n labels.config(text=\"X's Turn\")\r\n b.configure(state=DISABLED)\r\n b['text'] = 'O' \r\n checkwin() \r\n if connection_established == True:\r\n sock.send(to_send.encode())\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nb1 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b1))\r\nb1.grid(row=0,column=0)\r\nb2 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b2))\r\nb2.grid(row=0,column=1)\r\nb3 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b3))\r\nb3.grid(row=0,column=2)\r\n\r\nb4 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b4))\r\nb4.grid(row=1,column=0)\r\nb5 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b5))\r\nb5.grid(row=1,column=1)\r\nb6 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b6))\r\nb6.grid(row=1,column=2)\r\n\r\nb7 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b7))\r\nb7.grid(row=2,column=0)\r\nb8 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b8))\r\nb8.grid(row=2,column=1)\r\nb9 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b9))\r\nb9.grid(row=2,column=2)\r\n\r\nlabels = Label(New, fg=\"white\",bg=\"black\", pady=1,text=\"Opponent Turn \",height=2,justify=\"center\")\r\nlabels.grid(row=3,column=0)\r\nfor w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n\r\n#menu = Menu(New)\r\n#New.config(menu=menu)\r\n#options = Menu(menu,tearoff=False)\r\n\r\n\r\nNew.mainloop()\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(__doc__)
<|reserved_special_token_0|>
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
<|reserved_special_token_0|>
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"', ''))
gps_matrix[1][key] = float(row[1].replace('"', ''))
except:
a = float(row[1].replace(',', ''))
print('problem string to float')
<|reserved_special_token_0|>
print('Estimated number of clusters: %d' % n_clusters_)
print('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))
print('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))
print('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(
labels_true, labels))
print('Adjusted Mutual Information: %0.3f' % metrics.
adjusted_mutual_info_score(labels_true, labels))
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
<|reserved_special_token_0|>
for k, col in zip(unique_labels, colors):
if k == -1:
col = 'k'
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(__doc__)
<|reserved_special_token_0|>
EARTHRADIUS = 6371.0
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
<|reserved_special_token_0|>
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"', ''))
gps_matrix[1][key] = float(row[1].replace('"', ''))
except:
a = float(row[1].replace(',', ''))
print('problem string to float')
D = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(
u, v))
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))
print('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))
print('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(
labels_true, labels))
print('Adjusted Mutual Information: %0.3f' % metrics.
adjusted_mutual_info_score(labels_true, labels))
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
<|reserved_special_token_0|>
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
col = 'k'
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import scipy as sp
import numpy as np
from scipy import spatial
print(__doc__)
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
EARTHRADIUS = 6371.0
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
import csv
import re
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"', ''))
gps_matrix[1][key] = float(row[1].replace('"', ''))
except:
a = float(row[1].replace(',', ''))
print('problem string to float')
D = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(
u, v))
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))
print('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))
print('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(
labels_true, labels))
print('Adjusted Mutual Information: %0.3f' % metrics.
adjusted_mutual_info_score(labels_true, labels))
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
import matplotlib.pyplot as plt
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
col = 'k'
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
import scipy as sp
import numpy as np
from scipy import spatial
print(__doc__)
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Calcule Distance Haversine Methods
EARTHRADIUS = 6371.0
def getDistanceByHaversine(loc1, loc2):
'''Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs'''
#
# "unpack" our numpy array, this extracts column wise arrays
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
#
# convert to radians ##### Completely identical
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
#
# haversine formula #### Same, but atan2 named arctan2 in numpy
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (np.sin(dlat/2))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2.0))**2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0-a))
km = EARTHRADIUS * c
return km
##############################################################################
# Create a Matrix with longitude and latitude
import csv
import re
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[0 for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"',''))
gps_matrix[1][key] = float(row[1].replace('"',''))
except:
a = float(row[1].replace(',',''))
print('problem string to float')
##############################################################################
# Calculate the Distance matrix
D = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(u,v))
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
flexible
|
{
"blob_id": "d2e3ac490ce5fdc20976567fa320a9e6a53cbe34",
"index": 1037,
"step-1": "<mask token>\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(__doc__)\n<mask token>\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\n<mask token>\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n row_count = sum(1 for row in data)\n gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"', ''))\n gps_matrix[1][key] = float(row[1].replace('\"', ''))\n except:\n a = float(row[1].replace(',', ''))\n print('problem string to float')\n<mask token>\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))\nprint('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))\nprint('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))\nprint('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(\n labels_true, labels))\nprint('Adjusted Mutual Information: %0.3f' % metrics.\n adjusted_mutual_info_score(labels_true, labels))\nprint('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))\n<mask token>\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n col = 'k'\n class_member_mask = labels == k\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=14)\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=6)\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"step-3": "<mask token>\nprint(__doc__)\n<mask token>\nEARTHRADIUS = 6371.0\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\n<mask token>\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n row_count = sum(1 for row in data)\n gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"', ''))\n gps_matrix[1][key] = float(row[1].replace('\"', ''))\n except:\n a = float(row[1].replace(',', ''))\n print('problem string to float')\nD = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(\n u, v))\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\nX = StandardScaler().fit_transform(X)\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))\nprint('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))\nprint('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))\nprint('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(\n labels_true, labels))\nprint('Adjusted Mutual Information: %0.3f' % metrics.\n adjusted_mutual_info_score(labels_true, labels))\nprint('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))\n<mask token>\nunique_labels = set(labels)\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n col = 'k'\n class_member_mask = labels == k\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=14)\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=6)\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"step-4": "<mask token>\nimport scipy as sp\nimport numpy as np\nfrom scipy import spatial\nprint(__doc__)\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nEARTHRADIUS = 6371.0\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\nimport csv\nimport re\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n row_count = sum(1 for row in data)\n gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"', ''))\n gps_matrix[1][key] = float(row[1].replace('\"', ''))\n except:\n a = float(row[1].replace(',', ''))\n print('problem string to float')\nD = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(\n u, v))\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\nX = StandardScaler().fit_transform(X)\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))\nprint('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))\nprint('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))\nprint('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(\n labels_true, labels))\nprint('Adjusted Mutual Information: %0.3f' % metrics.\n adjusted_mutual_info_score(labels_true, labels))\nprint('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))\nimport matplotlib.pyplot as plt\nunique_labels = set(labels)\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n col = 'k'\n class_member_mask = labels == k\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=14)\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=6)\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n===================================\nDemo of DBSCAN clustering algorithm\n===================================\n\nFinds core samples of high density and expands clusters from them.\n\n\"\"\"\nimport scipy as sp\nimport numpy as np\n\nfrom scipy import spatial\nprint(__doc__)\n\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\n\n##############################################################################\n# Calcule Distance Haversine Methods\n\nEARTHRADIUS = 6371.0\n\ndef getDistanceByHaversine(loc1, loc2):\n '''Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs'''\n #\n # \"unpack\" our numpy array, this extracts column wise arrays\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n #\n # convert to radians ##### Completely identical\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n #\n # haversine formula #### Same, but atan2 named arctan2 in numpy\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = (np.sin(dlat/2))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2.0))**2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0-a))\n km = EARTHRADIUS * c\n return km\n\n\n##############################################################################\n# Create a Matrix with longitude and latitude\n\nimport csv\nimport re\n\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n\n row_count = sum(1 for row in data)\n gps_matrix = [[0 for i in range(row_count)] for j in range(2)]\n\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"',''))\n gps_matrix[1][key] = float(row[1].replace('\"',''))\n except:\n a = float(row[1].replace(',',''))\n print('problem string to float')\n\n##############################################################################\n# Calculate the Distance matrix\n\nD = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(u,v))\n\n\n##############################################################################\n# Generate sample data\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\n\nX = StandardScaler().fit_transform(X)\n\n##############################################################################\n# Compute DBSCAN\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\n\n# Number of clusters in labels, ignoring noise if present.\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels_true, labels))\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels_true, labels))\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels_true, labels))\nprint(\"Adjusted Rand Index: %0.3f\"\n % metrics.adjusted_rand_score(labels_true, labels))\nprint(\"Adjusted Mutual Information: %0.3f\"\n % metrics.adjusted_mutual_info_score(labels_true, labels))\nprint(\"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X, labels))\n\n##############################################################################\n# Plot result\nimport matplotlib.pyplot as plt\n\n# Black removed and is used for noise instead.\nunique_labels = set(labels)\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = 'k'\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=14)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=6)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Praise(TimeStampedModel):
class Meta:
verbose_name = '칭찬'
verbose_name_plural = verbose_name
<|reserved_special_token_0|>
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Praise(TimeStampedModel):
class Meta:
verbose_name = '칭찬'
verbose_name_plural = verbose_name
content = models.CharField(verbose_name='내용', unique=True, max_length=200)
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
<|reserved_special_token_1|>
from django.contrib.postgres.fields import JSONField
from django.db import models
from service.models import TimeStampedModel
class Praise(TimeStampedModel):
class Meta:
verbose_name = '칭찬'
verbose_name_plural = verbose_name
content = models.CharField(verbose_name='내용', unique=True, max_length=200)
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
|
flexible
|
{
"blob_id": "a4db12fee72989f983c1069839dc0a5ede4561a3",
"index": 686,
"step-1": "<mask token>\n\n\nclass PraiseHistory(TimeStampedModel):\n\n\n class Meta:\n verbose_name = '칭찬 내역'\n verbose_name_plural = verbose_name\n praise = models.ForeignKey(Praise, verbose_name='칭찬')\n choices = JSONField(verbose_name='칭찬 대상 목록')\n sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200\n )\n receiver_key = models.CharField(verbose_name='받은 사람 user key',\n max_length=200)\n",
"step-2": "<mask token>\n\n\nclass Praise(TimeStampedModel):\n\n\n class Meta:\n verbose_name = '칭찬'\n verbose_name_plural = verbose_name\n <mask token>\n\n\nclass PraiseHistory(TimeStampedModel):\n\n\n class Meta:\n verbose_name = '칭찬 내역'\n verbose_name_plural = verbose_name\n praise = models.ForeignKey(Praise, verbose_name='칭찬')\n choices = JSONField(verbose_name='칭찬 대상 목록')\n sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200\n )\n receiver_key = models.CharField(verbose_name='받은 사람 user key',\n max_length=200)\n",
"step-3": "<mask token>\n\n\nclass Praise(TimeStampedModel):\n\n\n class Meta:\n verbose_name = '칭찬'\n verbose_name_plural = verbose_name\n content = models.CharField(verbose_name='내용', unique=True, max_length=200)\n\n\nclass PraiseHistory(TimeStampedModel):\n\n\n class Meta:\n verbose_name = '칭찬 내역'\n verbose_name_plural = verbose_name\n praise = models.ForeignKey(Praise, verbose_name='칭찬')\n choices = JSONField(verbose_name='칭찬 대상 목록')\n sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200\n )\n receiver_key = models.CharField(verbose_name='받은 사람 user key',\n max_length=200)\n",
"step-4": "from django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom service.models import TimeStampedModel\n\n\nclass Praise(TimeStampedModel):\n\n\n class Meta:\n verbose_name = '칭찬'\n verbose_name_plural = verbose_name\n content = models.CharField(verbose_name='내용', unique=True, max_length=200)\n\n\nclass PraiseHistory(TimeStampedModel):\n\n\n class Meta:\n verbose_name = '칭찬 내역'\n verbose_name_plural = verbose_name\n praise = models.ForeignKey(Praise, verbose_name='칭찬')\n choices = JSONField(verbose_name='칭찬 대상 목록')\n sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200\n )\n receiver_key = models.CharField(verbose_name='받은 사람 user key',\n max_length=200)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extract_gpx_data(gpx_file_path, attribute='elevation'):
"""Reads in a GPX file and returns a list of values
for a specified GPX attribute.
Parameters
----------
gpx_file_path : str
File path to the GPX file (.gpx extension).
attribute: str
Name of the attribute to extract. Default
value is 'elevation'. Must match one of the
entries in the function-defined list.
Returns
-------
data : list
List containing float values of the extracted
GPX attributes.
"""
with open(gpx_file_path) as gpx_file:
gpx = gpxpy.parse(gpx_file)
primary_attributes = ['latitude', 'longitude', 'elevation', 'time']
secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',
'speed', 'verticalSpeed']
if attribute in primary_attributes:
data = [{'latitude': point.latitude, 'longitude': point.longitude,
'elevation': point.elevation, 'time': point.time}.get(attribute
) for track in gpx.tracks for segment in track.segments for
point in segment.points]
print(f'Extracted {attribute} data.')
elif attribute in secondary_attributes:
pattern = re.compile(f'^.*{attribute}.*$')
data = [float(extension.text) for track in gpx.tracks for segment in
track.segments for point in segment.points for extension in
point.extensions if pattern.match(extension.tag)]
print(f'Extracted {attribute} data.')
else:
data = []
print(
'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'
)
return data
<|reserved_special_token_1|>
import re
import gpxpy
def extract_gpx_data(gpx_file_path, attribute='elevation'):
"""Reads in a GPX file and returns a list of values
for a specified GPX attribute.
Parameters
----------
gpx_file_path : str
File path to the GPX file (.gpx extension).
attribute: str
Name of the attribute to extract. Default
value is 'elevation'. Must match one of the
entries in the function-defined list.
Returns
-------
data : list
List containing float values of the extracted
GPX attributes.
"""
with open(gpx_file_path) as gpx_file:
gpx = gpxpy.parse(gpx_file)
primary_attributes = ['latitude', 'longitude', 'elevation', 'time']
secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',
'speed', 'verticalSpeed']
if attribute in primary_attributes:
data = [{'latitude': point.latitude, 'longitude': point.longitude,
'elevation': point.elevation, 'time': point.time}.get(attribute
) for track in gpx.tracks for segment in track.segments for
point in segment.points]
print(f'Extracted {attribute} data.')
elif attribute in secondary_attributes:
pattern = re.compile(f'^.*{attribute}.*$')
data = [float(extension.text) for track in gpx.tracks for segment in
track.segments for point in segment.points for extension in
point.extensions if pattern.match(extension.tag)]
print(f'Extracted {attribute} data.')
else:
data = []
print(
'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'
)
return data
<|reserved_special_token_1|>
import re
import gpxpy
def extract_gpx_data(gpx_file_path, attribute='elevation'):
"""Reads in a GPX file and returns a list of values
for a specified GPX attribute.
Parameters
----------
gpx_file_path : str
File path to the GPX file (.gpx extension).
attribute: str
Name of the attribute to extract. Default
value is 'elevation'. Must match one of the
entries in the function-defined list.
Returns
-------
data : list
List containing float values of the extracted
GPX attributes.
"""
# Open GPX file in context manager and parse with gpxpy
with open(gpx_file_path) as gpx_file:
gpx = gpxpy.parse(gpx_file)
# Define GPX main attributes
primary_attributes = [
"latitude",
"longitude",
"elevation",
"time"
]
# Define GPX extension attributes
secondary_attributes = [
"cadence", "distance", "altitude",
"energy", "speed", "verticalSpeed"
]
# Check if specified attribute is in main
# GPX attributes (lat/lon/elevation/time)
if attribute in primary_attributes:
# Create list of values for attribute
data = [{
"latitude": point.latitude,
"longitude": point.longitude,
"elevation": point.elevation,
"time": point.time
}.get(attribute)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
]
print(f"Extracted {attribute} data.")
# Check if specified attribute is in
# GPX extensions (cadence/distance/altitude
# /energy/speed/verticalSpeed)
elif attribute in secondary_attributes:
# Define pattern for attribute to match on
pattern = re.compile(f"^.*{attribute}.*$")
# Create list of values for attribute
data = [
float(extension.text)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
for extension in point.extensions
if pattern.match(extension.tag)
]
print(f"Extracted {attribute} data.")
else:
data = []
print("Invalid attribute. Must be one of the following: "
"latitude, longitude, elevation, time, cadence "
"distance, altitude, energy, speed, verticalSpeed.")
# List of attribute values
return data
|
flexible
|
{
"blob_id": "cc6d18785eff0406ff7f38f18f15476375e31b76",
"index": 9254,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n primary_attributes = ['latitude', 'longitude', 'elevation', 'time']\n secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',\n 'speed', 'verticalSpeed']\n if attribute in primary_attributes:\n data = [{'latitude': point.latitude, 'longitude': point.longitude,\n 'elevation': point.elevation, 'time': point.time}.get(attribute\n ) for track in gpx.tracks for segment in track.segments for\n point in segment.points]\n print(f'Extracted {attribute} data.')\n elif attribute in secondary_attributes:\n pattern = re.compile(f'^.*{attribute}.*$')\n data = [float(extension.text) for track in gpx.tracks for segment in\n track.segments for point in segment.points for extension in\n point.extensions if pattern.match(extension.tag)]\n print(f'Extracted {attribute} data.')\n else:\n data = []\n print(\n 'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'\n )\n return data\n",
"step-3": "import re\nimport gpxpy\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n primary_attributes = ['latitude', 'longitude', 'elevation', 'time']\n secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',\n 'speed', 'verticalSpeed']\n if attribute in primary_attributes:\n data = [{'latitude': point.latitude, 'longitude': point.longitude,\n 'elevation': point.elevation, 'time': point.time}.get(attribute\n ) for track in gpx.tracks for segment in track.segments for\n point in segment.points]\n print(f'Extracted {attribute} data.')\n elif attribute in secondary_attributes:\n pattern = re.compile(f'^.*{attribute}.*$')\n data = [float(extension.text) for track in gpx.tracks for segment in\n track.segments for point in segment.points for extension in\n point.extensions if pattern.match(extension.tag)]\n print(f'Extracted {attribute} data.')\n else:\n data = []\n print(\n 'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'\n )\n return data\n",
"step-4": "import re\nimport gpxpy\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n # Open GPX file in context manager and parse with gpxpy\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n\n # Define GPX main attributes\n primary_attributes = [\n \"latitude\",\n \"longitude\",\n \"elevation\",\n \"time\"\n ]\n\n # Define GPX extension attributes\n secondary_attributes = [\n \"cadence\", \"distance\", \"altitude\",\n \"energy\", \"speed\", \"verticalSpeed\"\n ]\n\n # Check if specified attribute is in main\n # GPX attributes (lat/lon/elevation/time)\n if attribute in primary_attributes:\n\n # Create list of values for attribute\n data = [{\n \"latitude\": point.latitude,\n \"longitude\": point.longitude,\n \"elevation\": point.elevation,\n \"time\": point.time\n }.get(attribute)\n for track in gpx.tracks\n for segment in track.segments\n for point in segment.points\n ]\n\n print(f\"Extracted {attribute} data.\")\n\n # Check if specified attribute is in\n # GPX extensions (cadence/distance/altitude\n # /energy/speed/verticalSpeed)\n elif attribute in secondary_attributes:\n\n # Define pattern for attribute to match on\n pattern = re.compile(f\"^.*{attribute}.*$\")\n\n # Create list of values for attribute\n data = [\n float(extension.text)\n for track in gpx.tracks\n for segment in track.segments\n for point in segment.points\n for extension in point.extensions\n if pattern.match(extension.tag)\n ]\n\n print(f\"Extracted {attribute} data.\")\n\n else:\n data = []\n print(\"Invalid attribute. Must be one of the following: \"\n \"latitude, longitude, elevation, time, cadence \"\n \"distance, altitude, energy, speed, verticalSpeed.\")\n\n # List of attribute values\n return data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django import forms
from django.conf import settings
class SurveyFeedback(forms.Form):
CHOICES = [('Very Satisfied', 'Very Satisfied'), ('Satisfied',
'Satisfied'), ('Neither', 'Neither'), ('Dissatisfied',
'Dissatisfied'), ('Very Dissatisfied', 'Very Dissatisfied')]
radioFeedback = forms.ChoiceField(label=
'How satisfied were you with the service you just received?',
choices=CHOICES, widget=forms.RadioSelect(), required=False)
textFeedback = forms.CharField(label='Survey Feedback', max_length=
settings.MAX_CHARS, required=False, widget=forms.Textarea(attrs={
'rows': 10, 'cols': 80, 'onKeyDown':
'return setTimeout(remainingChars(' + str(settings.MAX_CHARS) +
'), 100);'}))
|
normal
|
{
"blob_id": "a9b7abaaaa811cf12a15def1f2dd21f95bac3d62",
"index": 6310,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SurveyFeedback(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SurveyFeedback(forms.Form):\n CHOICES = [('Very Satisfied', 'Very Satisfied'), ('Satisfied',\n 'Satisfied'), ('Neither', 'Neither'), ('Dissatisfied',\n 'Dissatisfied'), ('Very Dissatisfied', 'Very Dissatisfied')]\n radioFeedback = forms.ChoiceField(label=\n 'How satisfied were you with the service you just received?',\n choices=CHOICES, widget=forms.RadioSelect(), required=False)\n textFeedback = forms.CharField(label='Survey Feedback', max_length=\n settings.MAX_CHARS, required=False, widget=forms.Textarea(attrs={\n 'rows': 10, 'cols': 80, 'onKeyDown': \n 'return setTimeout(remainingChars(' + str(settings.MAX_CHARS) +\n '), 100);'}))\n",
"step-4": "from django import forms\nfrom django.conf import settings\n\n\nclass SurveyFeedback(forms.Form):\n CHOICES = [('Very Satisfied', 'Very Satisfied'), ('Satisfied',\n 'Satisfied'), ('Neither', 'Neither'), ('Dissatisfied',\n 'Dissatisfied'), ('Very Dissatisfied', 'Very Dissatisfied')]\n radioFeedback = forms.ChoiceField(label=\n 'How satisfied were you with the service you just received?',\n choices=CHOICES, widget=forms.RadioSelect(), required=False)\n textFeedback = forms.CharField(label='Survey Feedback', max_length=\n settings.MAX_CHARS, required=False, widget=forms.Textarea(attrs={\n 'rows': 10, 'cols': 80, 'onKeyDown': \n 'return setTimeout(remainingChars(' + str(settings.MAX_CHARS) +\n '), 100);'}))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def play():
print('playing tank games...')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def play():
print('playing tank games...')
print('runing tank now!!!')
<|reserved_special_token_1|>
def play():
print("playing tank games...")
print("runing tank now!!!")
|
flexible
|
{
"blob_id": "8c7fe90972feec19e280d3bccd39391af666608a",
"index": 9410,
"step-1": "<mask token>\n",
"step-2": "def play():\n print('playing tank games...')\n\n\n<mask token>\n",
"step-3": "def play():\n print('playing tank games...')\n\n\nprint('runing tank now!!!')\n",
"step-4": "def play():\n print(\"playing tank games...\")\nprint(\"runing tank now!!!\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pyspark.sql import SparkSession, Row, functions, Column
from pyspark.sql.types import *
from pyspark.ml import Pipeline, Estimator
from pyspark.ml.feature import SQLTransformer, VectorAssembler
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder
from pyspark.ml.regression import (LinearRegression,
GBTRegressor,
RandomForestRegressor,
DecisionTreeRegressor)
import sys
from weather_tools_mv import *
schema = StructType([
StructField('station', StringType(), False),
StructField('date', DateType(), False),
# StructField('dayofyear', IntegerType(), False),
StructField('latitude', FloatType(), False),
StructField('longitude', FloatType(), False),
StructField('elevation', FloatType(), False),
StructField('tmax', FloatType(), False),
])
def get_data(inputloc, tablename='data'):
data = spark.read.csv(inputloc, schema=schema)
data.createOrReplaceTempView(tablename)
return data
input_loc = 'tmax-2'
data = get_data(input_loc)
#Part 2a
# years = list(map(lambda x: str(x), range(2000, 2018)))
years = ['2000', '2001', '2002', '2003']
reduced_data = dict()
def resolved_max(df):
df_max = df.groupBy('station').agg({'date': 'max'}).select(functions.col('station'),
functions.col('max(date)').alias('d_max'))
d_max = df.join(df_max, 'station').where(functions.col('d_max') == functions.col('date'))
fin_ret = d_max.select(functions.col('latitude'),
functions.col('longitude'),
functions.col('tmax'),
functions.col('station'))
return list(map(lambda row: row.asDict(), fin_ret.collect()))
for i in range(0, len(years) - 1):
lower = years[i]
upper = years[i+1]
zone = data.filter(functions.col('date') < upper).filter(functions.col('date') >= lower)
reduced_data[lower+"_"+upper] = resolved_max(zone)
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lat = []
lon = []
val = []
for y in reduced_data['2000_2001']:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(y['tmax'])
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.bwr)
# add colorbar.
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Max Temperature (in Celcius)')
plt.title('Year 2000')
plt.savefig('2a_2000.png')
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lat = []
lon = []
val = []
for y in reduced_data['2001_2002']:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(y['tmax'])
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.coolwarm)
# add colorbar.
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Max Temperature (in Celcius)')
plt.title('Year 2001')
plt.savefig('2a_2001.png')
# Part 2b
def make_weather_trainers(trainRatio,
estimator_gridbuilders,
metricName=None):
"""Construct a list of TrainValidationSplit estimators for weather data
where `estimator_gridbuilders` is a list of (Estimator, ParamGridBuilder) tuples
and 0 < `trainRatio` <= 1 determines the fraction of rows used for training.
The RegressionEvaluator will use a non-default `metricName`, if specified.
"""
feature_cols = ['latitude', 'longitude', 'elevation']
column_names = dict(featuresCol="features",
labelCol="tmax",
predictionCol="tmax_pred")
feature_assembler = VectorAssembler(
inputCols=feature_cols,
outputCol=column_names["featuresCol"])
ev = (RegressionEvaluator()
.setLabelCol(column_names["labelCol"])
.setPredictionCol(column_names["predictionCol"])
)
if metricName:
ev = ev.setMetricName(metricName)
tvs_list = []
for est, pgb in estimator_gridbuilders:
est = est.setParams(**column_names)
pl = Pipeline(stages=[feature_assembler, est])
paramGrid = pgb.build()
tvs_list.append(TrainValidationSplit(estimator=pl,
estimatorParamMaps=paramGrid,
evaluator=ev,
trainRatio=trainRatio))
return tvs_list
def get_best_weather_model(data):
train, test = data.randomSplit([0.75, 0.25])
train = train.cache()
test = test.cache()
# e.g., use print(LinearRegression().explainParams()) to see what can be tuned
estimator_gridbuilders = [
estimator_gridbuilder(
LinearRegression(),
dict(regParam=[0.3, 0.6],
elasticNetParam=[0, 0.5],
maxIter=[10, 20]
)),
estimator_gridbuilder(
GBTRegressor(),
dict(lossType=["squared"],
maxDepth=[5, 10],
maxIter=[2, 5],
stepSize=[0.1]
)),
estimator_gridbuilder(
RandomForestRegressor(),
dict(numTrees=[5, 10],
maxDepth=[5, 15],
featureSubsetStrategy=["auto"]
))
]
metricName = 'r2'
tvs_list = make_weather_trainers(.2, # fraction of data for training
estimator_gridbuilders,
metricName)
ev = tvs_list[0].getEvaluator()
scorescale = 1 if ev.isLargerBetter() else -1
model_name_scores = []
for tvs in tvs_list:
model = tvs.fit(train)
test_pred = model.transform(test)
score = ev.evaluate(test_pred) * scorescale
model_name_scores.append((model, get_estimator_name(tvs.getEstimator()), score))
best_model, best_name, best_score = max(model_name_scores, key=lambda triplet: triplet[2])
print("\n\nBest model is %s with validation data %s score %f" % (best_name, ev.getMetricName(), best_score*scorescale))
return best_model
fortrain, holdout = data.randomSplit([0.75, 0.25])
model = get_best_weather_model(fortrain)
print("\n\n\nBest parameters on test data:\n", get_best_tvs_model_params(model))
# Part 2b1
import elevation_grid as eg
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
import numpy as np
lat_range = range(-90, 90, 1)
lon_range = range(-180, 180, 1)
combo = []
for lat in lat_range:
for lon in lon_range:
elev = eg.get_elevation(lat, lon)
combo.append((lat, lon, float(elev)))
dataset = spark.createDataFrame(combo,["latitude", "longitude", "elevation"])
pred = model.transform(dataset).collect()
collected_predictions = list(map(lambda row: row.asDict(), pred))
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lon = []
lat = []
val = []
for y in collected_predictions:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(y['tmax_pred'])
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.coolwarm)
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Max Temperature (in Celcius)')
plt.title('Predicted Heat Map')
plt.savefig('2b1_heat.png')
# Part 2b2
pred = model.transform(holdout).collect()
collected_predictions = list(map(lambda row: row.asDict(), pred))
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lon = []
lat = []
val = []
for y in collected_predictions:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(abs(y['tmax_pred'] - y['tmax']))
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.Reds)
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Absolute Temperature Difference (in Celcius)')
plt.title('Regression Error Map')
plt.savefig('2b2_regression_error.png')
|
normal
|
{
"blob_id": "3852ff2f3f4ac889256bd5f4e36a86d483857cef",
"index": 6534,
"step-1": "<mask token>\n\n\ndef get_data(inputloc, tablename='data'):\n data = spark.read.csv(inputloc, schema=schema)\n data.createOrReplaceTempView(tablename)\n return data\n\n\n<mask token>\n\n\ndef resolved_max(df):\n df_max = df.groupBy('station').agg({'date': 'max'}).select(functions.\n col('station'), functions.col('max(date)').alias('d_max'))\n d_max = df.join(df_max, 'station').where(functions.col('d_max') ==\n functions.col('date'))\n fin_ret = d_max.select(functions.col('latitude'), functions.col(\n 'longitude'), functions.col('tmax'), functions.col('station'))\n return list(map(lambda row: row.asDict(), fin_ret.collect()))\n\n\n<mask token>\n\n\ndef make_weather_trainers(trainRatio, estimator_gridbuilders, metricName=None):\n \"\"\"Construct a list of TrainValidationSplit estimators for weather data\n where `estimator_gridbuilders` is a list of (Estimator, ParamGridBuilder) tuples\n and 0 < `trainRatio` <= 1 determines the fraction of rows used for training.\n The RegressionEvaluator will use a non-default `metricName`, if specified.\n \"\"\"\n feature_cols = ['latitude', 'longitude', 'elevation']\n column_names = dict(featuresCol='features', labelCol='tmax',\n predictionCol='tmax_pred')\n feature_assembler = VectorAssembler(inputCols=feature_cols, outputCol=\n column_names['featuresCol'])\n ev = RegressionEvaluator().setLabelCol(column_names['labelCol']\n ).setPredictionCol(column_names['predictionCol'])\n if metricName:\n ev = ev.setMetricName(metricName)\n tvs_list = []\n for est, pgb in estimator_gridbuilders:\n est = est.setParams(**column_names)\n pl = Pipeline(stages=[feature_assembler, est])\n paramGrid = pgb.build()\n tvs_list.append(TrainValidationSplit(estimator=pl,\n estimatorParamMaps=paramGrid, evaluator=ev, trainRatio=trainRatio))\n return tvs_list\n\n\ndef get_best_weather_model(data):\n train, test = data.randomSplit([0.75, 0.25])\n train = train.cache()\n test = test.cache()\n estimator_gridbuilders = [estimator_gridbuilder(LinearRegression(),\n dict(regParam=[0.3, 0.6], elasticNetParam=[0, 0.5], maxIter=[10, 20\n ])), estimator_gridbuilder(GBTRegressor(), dict(lossType=['squared'\n ], maxDepth=[5, 10], maxIter=[2, 5], stepSize=[0.1])),\n estimator_gridbuilder(RandomForestRegressor(), dict(numTrees=[5, 10\n ], maxDepth=[5, 15], featureSubsetStrategy=['auto']))]\n metricName = 'r2'\n tvs_list = make_weather_trainers(0.2, estimator_gridbuilders, metricName)\n ev = tvs_list[0].getEvaluator()\n scorescale = 1 if ev.isLargerBetter() else -1\n model_name_scores = []\n for tvs in tvs_list:\n model = tvs.fit(train)\n test_pred = model.transform(test)\n score = ev.evaluate(test_pred) * scorescale\n model_name_scores.append((model, get_estimator_name(tvs.\n getEstimator()), score))\n best_model, best_name, best_score = max(model_name_scores, key=lambda\n triplet: triplet[2])\n print('\\n\\nBest model is %s with validation data %s score %f' % (\n best_name, ev.getMetricName(), best_score * scorescale))\n return best_model\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data(inputloc, tablename='data'):\n data = spark.read.csv(inputloc, schema=schema)\n data.createOrReplaceTempView(tablename)\n return data\n\n\n<mask token>\n\n\ndef resolved_max(df):\n df_max = df.groupBy('station').agg({'date': 'max'}).select(functions.\n col('station'), functions.col('max(date)').alias('d_max'))\n d_max = df.join(df_max, 'station').where(functions.col('d_max') ==\n functions.col('date'))\n fin_ret = d_max.select(functions.col('latitude'), functions.col(\n 'longitude'), functions.col('tmax'), functions.col('station'))\n return list(map(lambda row: row.asDict(), fin_ret.collect()))\n\n\nfor i in range(0, len(years) - 1):\n lower = years[i]\n upper = years[i + 1]\n zone = data.filter(functions.col('date') < upper).filter(functions.col(\n 'date') >= lower)\n reduced_data[lower + '_' + upper] = resolved_max(zone)\n<mask token>\nplt.figure(figsize=(16, 12))\n<mask token>\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n<mask token>\nfor y in reduced_data['2000_2001']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\n<mask token>\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2000')\nplt.savefig('2a_2000.png')\nplt.figure(figsize=(16, 12))\n<mask token>\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n<mask token>\nfor y in reduced_data['2001_2002']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\n<mask token>\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2001')\nplt.savefig('2a_2001.png')\n\n\ndef make_weather_trainers(trainRatio, estimator_gridbuilders, metricName=None):\n \"\"\"Construct a list of TrainValidationSplit estimators for weather data\n where `estimator_gridbuilders` is a list of (Estimator, ParamGridBuilder) tuples\n and 0 < `trainRatio` <= 1 determines the fraction of rows used for training.\n The RegressionEvaluator will use a non-default `metricName`, if specified.\n \"\"\"\n feature_cols = ['latitude', 'longitude', 'elevation']\n column_names = dict(featuresCol='features', labelCol='tmax',\n predictionCol='tmax_pred')\n feature_assembler = VectorAssembler(inputCols=feature_cols, outputCol=\n column_names['featuresCol'])\n ev = RegressionEvaluator().setLabelCol(column_names['labelCol']\n ).setPredictionCol(column_names['predictionCol'])\n if metricName:\n ev = ev.setMetricName(metricName)\n tvs_list = []\n for est, pgb in estimator_gridbuilders:\n est = est.setParams(**column_names)\n pl = Pipeline(stages=[feature_assembler, est])\n paramGrid = pgb.build()\n tvs_list.append(TrainValidationSplit(estimator=pl,\n estimatorParamMaps=paramGrid, evaluator=ev, trainRatio=trainRatio))\n return tvs_list\n\n\ndef get_best_weather_model(data):\n train, test = data.randomSplit([0.75, 0.25])\n train = train.cache()\n test = test.cache()\n estimator_gridbuilders = [estimator_gridbuilder(LinearRegression(),\n dict(regParam=[0.3, 0.6], elasticNetParam=[0, 0.5], maxIter=[10, 20\n ])), estimator_gridbuilder(GBTRegressor(), dict(lossType=['squared'\n ], maxDepth=[5, 10], maxIter=[2, 5], stepSize=[0.1])),\n estimator_gridbuilder(RandomForestRegressor(), dict(numTrees=[5, 10\n ], maxDepth=[5, 15], featureSubsetStrategy=['auto']))]\n metricName = 'r2'\n tvs_list = make_weather_trainers(0.2, estimator_gridbuilders, metricName)\n ev = tvs_list[0].getEvaluator()\n scorescale = 1 if ev.isLargerBetter() else -1\n model_name_scores = []\n for tvs in tvs_list:\n model = tvs.fit(train)\n test_pred = model.transform(test)\n score = ev.evaluate(test_pred) * scorescale\n model_name_scores.append((model, get_estimator_name(tvs.\n getEstimator()), score))\n best_model, best_name, best_score = max(model_name_scores, key=lambda\n triplet: triplet[2])\n print('\\n\\nBest model is %s with validation data %s score %f' % (\n best_name, ev.getMetricName(), best_score * scorescale))\n return best_model\n\n\n<mask token>\nprint(\"\"\"\n\n\nBest parameters on test data:\n\"\"\", get_best_tvs_model_params(model)\n )\n<mask token>\nfor lat in lat_range:\n for lon in lon_range:\n elev = eg.get_elevation(lat, lon)\n combo.append((lat, lon, float(elev)))\n<mask token>\nplt.figure(figsize=(16, 12))\n<mask token>\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n<mask token>\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax_pred'])\n<mask token>\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Predicted Heat Map')\nplt.savefig('2b1_heat.png')\n<mask token>\nplt.figure(figsize=(16, 12))\n<mask token>\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n<mask token>\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(abs(y['tmax_pred'] - y['tmax']))\n<mask token>\ncbar.set_label('Absolute Temperature Difference (in Celcius)')\nplt.title('Regression Error Map')\nplt.savefig('2b2_regression_error.png')\n",
"step-3": "<mask token>\nschema = StructType([StructField('station', StringType(), False),\n StructField('date', DateType(), False), StructField('latitude',\n FloatType(), False), StructField('longitude', FloatType(), False),\n StructField('elevation', FloatType(), False), StructField('tmax',\n FloatType(), False)])\n\n\ndef get_data(inputloc, tablename='data'):\n data = spark.read.csv(inputloc, schema=schema)\n data.createOrReplaceTempView(tablename)\n return data\n\n\ninput_loc = 'tmax-2'\ndata = get_data(input_loc)\nyears = ['2000', '2001', '2002', '2003']\nreduced_data = dict()\n\n\ndef resolved_max(df):\n df_max = df.groupBy('station').agg({'date': 'max'}).select(functions.\n col('station'), functions.col('max(date)').alias('d_max'))\n d_max = df.join(df_max, 'station').where(functions.col('d_max') ==\n functions.col('date'))\n fin_ret = d_max.select(functions.col('latitude'), functions.col(\n 'longitude'), functions.col('tmax'), functions.col('station'))\n return list(map(lambda row: row.asDict(), fin_ret.collect()))\n\n\nfor i in range(0, len(years) - 1):\n lower = years[i]\n upper = years[i + 1]\n zone = data.filter(functions.col('date') < upper).filter(functions.col(\n 'date') >= lower)\n reduced_data[lower + '_' + upper] = resolved_max(zone)\n<mask token>\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlat = []\nlon = []\nval = []\nfor y in reduced_data['2000_2001']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.bwr)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2000')\nplt.savefig('2a_2000.png')\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlat = []\nlon = []\nval = []\nfor y in reduced_data['2001_2002']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.coolwarm)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2001')\nplt.savefig('2a_2001.png')\n\n\ndef make_weather_trainers(trainRatio, estimator_gridbuilders, metricName=None):\n \"\"\"Construct a list of TrainValidationSplit estimators for weather data\n where `estimator_gridbuilders` is a list of (Estimator, ParamGridBuilder) tuples\n and 0 < `trainRatio` <= 1 determines the fraction of rows used for training.\n The RegressionEvaluator will use a non-default `metricName`, if specified.\n \"\"\"\n feature_cols = ['latitude', 'longitude', 'elevation']\n column_names = dict(featuresCol='features', labelCol='tmax',\n predictionCol='tmax_pred')\n feature_assembler = VectorAssembler(inputCols=feature_cols, outputCol=\n column_names['featuresCol'])\n ev = RegressionEvaluator().setLabelCol(column_names['labelCol']\n ).setPredictionCol(column_names['predictionCol'])\n if metricName:\n ev = ev.setMetricName(metricName)\n tvs_list = []\n for est, pgb in estimator_gridbuilders:\n est = est.setParams(**column_names)\n pl = Pipeline(stages=[feature_assembler, est])\n paramGrid = pgb.build()\n tvs_list.append(TrainValidationSplit(estimator=pl,\n estimatorParamMaps=paramGrid, evaluator=ev, trainRatio=trainRatio))\n return tvs_list\n\n\ndef get_best_weather_model(data):\n train, test = data.randomSplit([0.75, 0.25])\n train = train.cache()\n test = test.cache()\n estimator_gridbuilders = [estimator_gridbuilder(LinearRegression(),\n dict(regParam=[0.3, 0.6], elasticNetParam=[0, 0.5], maxIter=[10, 20\n ])), estimator_gridbuilder(GBTRegressor(), dict(lossType=['squared'\n ], maxDepth=[5, 10], maxIter=[2, 5], stepSize=[0.1])),\n estimator_gridbuilder(RandomForestRegressor(), dict(numTrees=[5, 10\n ], maxDepth=[5, 15], featureSubsetStrategy=['auto']))]\n metricName = 'r2'\n tvs_list = make_weather_trainers(0.2, estimator_gridbuilders, metricName)\n ev = tvs_list[0].getEvaluator()\n scorescale = 1 if ev.isLargerBetter() else -1\n model_name_scores = []\n for tvs in tvs_list:\n model = tvs.fit(train)\n test_pred = model.transform(test)\n score = ev.evaluate(test_pred) * scorescale\n model_name_scores.append((model, get_estimator_name(tvs.\n getEstimator()), score))\n best_model, best_name, best_score = max(model_name_scores, key=lambda\n triplet: triplet[2])\n print('\\n\\nBest model is %s with validation data %s score %f' % (\n best_name, ev.getMetricName(), best_score * scorescale))\n return best_model\n\n\nfortrain, holdout = data.randomSplit([0.75, 0.25])\nmodel = get_best_weather_model(fortrain)\nprint(\"\"\"\n\n\nBest parameters on test data:\n\"\"\", get_best_tvs_model_params(model)\n )\n<mask token>\nlat_range = range(-90, 90, 1)\nlon_range = range(-180, 180, 1)\ncombo = []\nfor lat in lat_range:\n for lon in lon_range:\n elev = eg.get_elevation(lat, lon)\n combo.append((lat, lon, float(elev)))\ndataset = spark.createDataFrame(combo, ['latitude', 'longitude', 'elevation'])\npred = model.transform(dataset).collect()\ncollected_predictions = list(map(lambda row: row.asDict(), pred))\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlon = []\nlat = []\nval = []\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax_pred'])\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.coolwarm)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Predicted Heat Map')\nplt.savefig('2b1_heat.png')\npred = model.transform(holdout).collect()\ncollected_predictions = list(map(lambda row: row.asDict(), pred))\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlon = []\nlat = []\nval = []\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(abs(y['tmax_pred'] - y['tmax']))\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.Reds)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Absolute Temperature Difference (in Celcius)')\nplt.title('Regression Error Map')\nplt.savefig('2b2_regression_error.png')\n",
"step-4": "from pyspark.sql import SparkSession, Row, functions, Column\nfrom pyspark.sql.types import *\nfrom pyspark.ml import Pipeline, Estimator\nfrom pyspark.ml.feature import SQLTransformer, VectorAssembler\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder\nfrom pyspark.ml.regression import LinearRegression, GBTRegressor, RandomForestRegressor, DecisionTreeRegressor\nimport sys\nfrom weather_tools_mv import *\nschema = StructType([StructField('station', StringType(), False),\n StructField('date', DateType(), False), StructField('latitude',\n FloatType(), False), StructField('longitude', FloatType(), False),\n StructField('elevation', FloatType(), False), StructField('tmax',\n FloatType(), False)])\n\n\ndef get_data(inputloc, tablename='data'):\n data = spark.read.csv(inputloc, schema=schema)\n data.createOrReplaceTempView(tablename)\n return data\n\n\ninput_loc = 'tmax-2'\ndata = get_data(input_loc)\nyears = ['2000', '2001', '2002', '2003']\nreduced_data = dict()\n\n\ndef resolved_max(df):\n df_max = df.groupBy('station').agg({'date': 'max'}).select(functions.\n col('station'), functions.col('max(date)').alias('d_max'))\n d_max = df.join(df_max, 'station').where(functions.col('d_max') ==\n functions.col('date'))\n fin_ret = d_max.select(functions.col('latitude'), functions.col(\n 'longitude'), functions.col('tmax'), functions.col('station'))\n return list(map(lambda row: row.asDict(), fin_ret.collect()))\n\n\nfor i in range(0, len(years) - 1):\n lower = years[i]\n upper = years[i + 1]\n zone = data.filter(functions.col('date') < upper).filter(functions.col(\n 'date') >= lower)\n reduced_data[lower + '_' + upper] = resolved_max(zone)\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.cm as cm\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlat = []\nlon = []\nval = []\nfor y in reduced_data['2000_2001']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.bwr)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2000')\nplt.savefig('2a_2000.png')\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlat = []\nlon = []\nval = []\nfor y in reduced_data['2001_2002']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.coolwarm)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2001')\nplt.savefig('2a_2001.png')\n\n\ndef make_weather_trainers(trainRatio, estimator_gridbuilders, metricName=None):\n \"\"\"Construct a list of TrainValidationSplit estimators for weather data\n where `estimator_gridbuilders` is a list of (Estimator, ParamGridBuilder) tuples\n and 0 < `trainRatio` <= 1 determines the fraction of rows used for training.\n The RegressionEvaluator will use a non-default `metricName`, if specified.\n \"\"\"\n feature_cols = ['latitude', 'longitude', 'elevation']\n column_names = dict(featuresCol='features', labelCol='tmax',\n predictionCol='tmax_pred')\n feature_assembler = VectorAssembler(inputCols=feature_cols, outputCol=\n column_names['featuresCol'])\n ev = RegressionEvaluator().setLabelCol(column_names['labelCol']\n ).setPredictionCol(column_names['predictionCol'])\n if metricName:\n ev = ev.setMetricName(metricName)\n tvs_list = []\n for est, pgb in estimator_gridbuilders:\n est = est.setParams(**column_names)\n pl = Pipeline(stages=[feature_assembler, est])\n paramGrid = pgb.build()\n tvs_list.append(TrainValidationSplit(estimator=pl,\n estimatorParamMaps=paramGrid, evaluator=ev, trainRatio=trainRatio))\n return tvs_list\n\n\ndef get_best_weather_model(data):\n train, test = data.randomSplit([0.75, 0.25])\n train = train.cache()\n test = test.cache()\n estimator_gridbuilders = [estimator_gridbuilder(LinearRegression(),\n dict(regParam=[0.3, 0.6], elasticNetParam=[0, 0.5], maxIter=[10, 20\n ])), estimator_gridbuilder(GBTRegressor(), dict(lossType=['squared'\n ], maxDepth=[5, 10], maxIter=[2, 5], stepSize=[0.1])),\n estimator_gridbuilder(RandomForestRegressor(), dict(numTrees=[5, 10\n ], maxDepth=[5, 15], featureSubsetStrategy=['auto']))]\n metricName = 'r2'\n tvs_list = make_weather_trainers(0.2, estimator_gridbuilders, metricName)\n ev = tvs_list[0].getEvaluator()\n scorescale = 1 if ev.isLargerBetter() else -1\n model_name_scores = []\n for tvs in tvs_list:\n model = tvs.fit(train)\n test_pred = model.transform(test)\n score = ev.evaluate(test_pred) * scorescale\n model_name_scores.append((model, get_estimator_name(tvs.\n getEstimator()), score))\n best_model, best_name, best_score = max(model_name_scores, key=lambda\n triplet: triplet[2])\n print('\\n\\nBest model is %s with validation data %s score %f' % (\n best_name, ev.getMetricName(), best_score * scorescale))\n return best_model\n\n\nfortrain, holdout = data.randomSplit([0.75, 0.25])\nmodel = get_best_weather_model(fortrain)\nprint(\"\"\"\n\n\nBest parameters on test data:\n\"\"\", get_best_tvs_model_params(model)\n )\nimport elevation_grid as eg\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import VectorAssembler\nimport numpy as np\nlat_range = range(-90, 90, 1)\nlon_range = range(-180, 180, 1)\ncombo = []\nfor lat in lat_range:\n for lon in lon_range:\n elev = eg.get_elevation(lat, lon)\n combo.append((lat, lon, float(elev)))\ndataset = spark.createDataFrame(combo, ['latitude', 'longitude', 'elevation'])\npred = model.transform(dataset).collect()\ncollected_predictions = list(map(lambda row: row.asDict(), pred))\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlon = []\nlat = []\nval = []\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax_pred'])\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.coolwarm)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Predicted Heat Map')\nplt.savefig('2b1_heat.png')\npred = model.transform(holdout).collect()\ncollected_predictions = list(map(lambda row: row.asDict(), pred))\nplt.figure(figsize=(16, 12))\neq_map = Basemap(projection='cyl', resolution='l', area_thresh=1000.0,\n lat_0=0, lon_0=0)\neq_map.fillcontinents(color='#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\nlon = []\nlat = []\nval = []\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(abs(y['tmax_pred'] - y['tmax']))\nx, y = eq_map(lon, lat)\ncs = eq_map.scatter(x, y, c=val, marker='o', cmap=cm.Reds)\ncbar = eq_map.colorbar(cs, location='bottom', pad='5%')\ncbar.set_label('Absolute Temperature Difference (in Celcius)')\nplt.title('Regression Error Map')\nplt.savefig('2b2_regression_error.png')\n",
"step-5": "from pyspark.sql import SparkSession, Row, functions, Column\nfrom pyspark.sql.types import *\n\nfrom pyspark.ml import Pipeline, Estimator\nfrom pyspark.ml.feature import SQLTransformer, VectorAssembler\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder\nfrom pyspark.ml.regression import (LinearRegression,\n GBTRegressor,\n RandomForestRegressor,\n DecisionTreeRegressor)\n\nimport sys\nfrom weather_tools_mv import *\n\nschema = StructType([\n StructField('station', StringType(), False),\n StructField('date', DateType(), False),\n # StructField('dayofyear', IntegerType(), False),\n StructField('latitude', FloatType(), False),\n StructField('longitude', FloatType(), False),\n StructField('elevation', FloatType(), False),\n StructField('tmax', FloatType(), False),\n])\n\ndef get_data(inputloc, tablename='data'):\n data = spark.read.csv(inputloc, schema=schema)\n data.createOrReplaceTempView(tablename)\n return data\n\ninput_loc = 'tmax-2'\ndata = get_data(input_loc)\n\n#Part 2a\n\n# years = list(map(lambda x: str(x), range(2000, 2018)))\n\nyears = ['2000', '2001', '2002', '2003']\n\nreduced_data = dict()\n\ndef resolved_max(df):\n df_max = df.groupBy('station').agg({'date': 'max'}).select(functions.col('station'),\n functions.col('max(date)').alias('d_max'))\n \n d_max = df.join(df_max, 'station').where(functions.col('d_max') == functions.col('date'))\n \n fin_ret = d_max.select(functions.col('latitude'),\n functions.col('longitude'),\n functions.col('tmax'),\n functions.col('station'))\n \n return list(map(lambda row: row.asDict(), fin_ret.collect()))\n\nfor i in range(0, len(years) - 1):\n lower = years[i]\n upper = years[i+1]\n zone = data.filter(functions.col('date') < upper).filter(functions.col('date') >= lower)\n reduced_data[lower+\"_\"+upper] = resolved_max(zone)\n\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.cm as cm\n\nplt.figure(figsize=(16,12))\n\neq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,\n lat_0=0, lon_0=0)\n# eq_map.drawcoastlines()\n# eq_map.drawcountries()\neq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n\nlat = []\nlon = []\nval = []\n\nfor y in reduced_data['2000_2001']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\n\nx, y = eq_map(lon, lat)\n\ncs = eq_map.scatter(x, y, c=val, marker=\"o\", cmap=cm.bwr)\n# add colorbar.\ncbar = eq_map.colorbar(cs,location='bottom',pad=\"5%\")\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2000')\nplt.savefig('2a_2000.png')\n\n\nplt.figure(figsize=(16,12))\n\neq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,\n lat_0=0, lon_0=0)\n# eq_map.drawcoastlines()\n# eq_map.drawcountries()\neq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n\nlat = []\nlon = []\nval = []\n\nfor y in reduced_data['2001_2002']:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax'])\n\nx, y = eq_map(lon, lat)\n\ncs = eq_map.scatter(x, y, c=val, marker=\"o\", cmap=cm.coolwarm)\n# add colorbar.\ncbar = eq_map.colorbar(cs,location='bottom',pad=\"5%\")\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Year 2001')\nplt.savefig('2a_2001.png')\n\n\n\n\n\n# Part 2b\n\n\ndef make_weather_trainers(trainRatio,\n estimator_gridbuilders,\n metricName=None):\n \"\"\"Construct a list of TrainValidationSplit estimators for weather data\n where `estimator_gridbuilders` is a list of (Estimator, ParamGridBuilder) tuples\n and 0 < `trainRatio` <= 1 determines the fraction of rows used for training.\n The RegressionEvaluator will use a non-default `metricName`, if specified.\n \"\"\"\n feature_cols = ['latitude', 'longitude', 'elevation']\n column_names = dict(featuresCol=\"features\",\n labelCol=\"tmax\",\n predictionCol=\"tmax_pred\")\n\n feature_assembler = VectorAssembler(\n inputCols=feature_cols,\n outputCol=column_names[\"featuresCol\"])\n ev = (RegressionEvaluator()\n .setLabelCol(column_names[\"labelCol\"])\n .setPredictionCol(column_names[\"predictionCol\"])\n )\n if metricName:\n ev = ev.setMetricName(metricName)\n tvs_list = []\n for est, pgb in estimator_gridbuilders:\n est = est.setParams(**column_names)\n\n pl = Pipeline(stages=[feature_assembler, est])\n\n paramGrid = pgb.build()\n tvs_list.append(TrainValidationSplit(estimator=pl,\n estimatorParamMaps=paramGrid,\n evaluator=ev,\n trainRatio=trainRatio))\n return tvs_list\n\ndef get_best_weather_model(data):\n train, test = data.randomSplit([0.75, 0.25])\n train = train.cache()\n test = test.cache()\n\n # e.g., use print(LinearRegression().explainParams()) to see what can be tuned\n estimator_gridbuilders = [\n estimator_gridbuilder(\n LinearRegression(),\n dict(regParam=[0.3, 0.6],\n elasticNetParam=[0, 0.5],\n maxIter=[10, 20]\n )),\n\n estimator_gridbuilder(\n GBTRegressor(),\n dict(lossType=[\"squared\"],\n maxDepth=[5, 10],\n maxIter=[2, 5],\n stepSize=[0.1]\n )),\n\n estimator_gridbuilder(\n RandomForestRegressor(),\n dict(numTrees=[5, 10],\n maxDepth=[5, 15],\n featureSubsetStrategy=[\"auto\"]\n ))\n ]\n \n metricName = 'r2'\n tvs_list = make_weather_trainers(.2, # fraction of data for training\n estimator_gridbuilders,\n metricName)\n ev = tvs_list[0].getEvaluator()\n scorescale = 1 if ev.isLargerBetter() else -1\n model_name_scores = []\n for tvs in tvs_list:\n model = tvs.fit(train)\n test_pred = model.transform(test)\n score = ev.evaluate(test_pred) * scorescale\n model_name_scores.append((model, get_estimator_name(tvs.getEstimator()), score))\n \n best_model, best_name, best_score = max(model_name_scores, key=lambda triplet: triplet[2])\n print(\"\\n\\nBest model is %s with validation data %s score %f\" % (best_name, ev.getMetricName(), best_score*scorescale))\n return best_model\n\nfortrain, holdout = data.randomSplit([0.75, 0.25])\n\nmodel = get_best_weather_model(fortrain)\nprint(\"\\n\\n\\nBest parameters on test data:\\n\", get_best_tvs_model_params(model))\n\n\n# Part 2b1\n\nimport elevation_grid as eg\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import VectorAssembler\nimport numpy as np\n\nlat_range = range(-90, 90, 1)\nlon_range = range(-180, 180, 1)\n\ncombo = []\n\nfor lat in lat_range:\n for lon in lon_range:\n elev = eg.get_elevation(lat, lon)\n combo.append((lat, lon, float(elev)))\n\ndataset = spark.createDataFrame(combo,[\"latitude\", \"longitude\", \"elevation\"])\npred = model.transform(dataset).collect()\n\ncollected_predictions = list(map(lambda row: row.asDict(), pred))\n\nplt.figure(figsize=(16,12))\n\neq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,\n lat_0=0, lon_0=0)\n# eq_map.drawcoastlines()\n# eq_map.drawcountries()\neq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n\nlon = []\nlat = []\nval = []\n\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(y['tmax_pred'])\n\nx, y = eq_map(lon, lat)\n\ncs = eq_map.scatter(x, y, c=val, marker=\"o\", cmap=cm.coolwarm)\ncbar = eq_map.colorbar(cs,location='bottom',pad=\"5%\")\ncbar.set_label('Max Temperature (in Celcius)')\nplt.title('Predicted Heat Map')\nplt.savefig('2b1_heat.png')\n\n\n# Part 2b2\n\npred = model.transform(holdout).collect()\n\ncollected_predictions = list(map(lambda row: row.asDict(), pred))\n\nplt.figure(figsize=(16,12))\n\neq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,\n lat_0=0, lon_0=0)\n# eq_map.drawcoastlines()\n# eq_map.drawcountries()\neq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)\neq_map.drawmapboundary(fill_color='#3b3b3b')\neq_map.drawmeridians(np.arange(0, 360, 30))\neq_map.drawparallels(np.arange(-90, 90, 30))\n\nlon = []\nlat = []\nval = []\n\nfor y in collected_predictions:\n lon.append(y['longitude'])\n lat.append(y['latitude'])\n val.append(abs(y['tmax_pred'] - y['tmax']))\n\nx, y = eq_map(lon, lat)\n\ncs = eq_map.scatter(x, y, c=val, marker=\"o\", cmap=cm.Reds)\ncbar = eq_map.colorbar(cs,location='bottom',pad=\"5%\")\ncbar.set_label('Absolute Temperature Difference (in Celcius)')\nplt.title('Regression Error Map')\nplt.savefig('2b2_regression_error.png')",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import json
import os
from six import iteritems
from ..exceptions import ColinConfigException
from ..constant import CONFIG_DIRECTORY, JSON
from ..loader import load_check_implementation
from ..target import is_compatible
class Config(object):
def __init__(self, name=None):
"""
Load config for colin.
:param name: str (name of the config file (without .json), default is "default"
"""
self.name = name or "default"
config_path = os.path.join(get_config_directory(), self.name + JSON)
try:
with open(config_path, mode='r') as config_file:
self.config_dict = json.load(config_file)
except Exception as ex:
raise ColinConfigException("Config file '{}' cannot be loaded.".format(config_path))
def get_checks(self, target_type, group=None, severity=None, tags=None):
"""
Get all checks for given type/group/severity/tags.
:param target_type: TargetType enum
:param group: str (if not group, get checks from all groups/directories)
:param severity: str (optional x required)
:param tags: list of str
:return: list of check instances
"""
check_files = self._get_check_files(group=group,
severity=severity)
groups = {}
for (group, check_files) in iteritems(check_files):
checks = []
for severity, check_file in check_files:
check_classes = load_check_implementation(path=check_file, severity=severity)
for check_class in check_classes:
if is_compatible(target_type, check_class, severity, tags):
checks.append(check_class)
groups[group] = checks
return groups
@staticmethod
def get_check_file(group, name):
"""
Get the check file from given group with given name.
:param group: str
:param name: str
:return: str (path)
"""
return os.path.join(get_checks_path(), group, name + ".py")
@staticmethod
def get_check_files(group, names, severity):
"""
Get the check files from given group with given names.
:param severity: str
:param group: str
:param names: list of str
:return: list of str (paths)
"""
check_files = []
for f in names:
check_file = Config.get_check_file(group=group,
name=f)
check_files.append((severity, check_file))
return check_files
def _get_check_groups(self, group=None):
"""
Get check group to validate
:param group: str (if None, all from the config will be used)
:return: list of str (group names)
"""
groups = [g for g in self.config_dict]
if group:
if group in groups:
check_groups = [group]
else:
check_groups = []
else:
check_groups = groups
return check_groups
def _get_check_files(self, group=None, severity=None):
"""
Get file names with checks filtered by group and severity.
:param group: str (if None, all groups will be used)
:param severity: str (if None, all severities will be used)
:return: list of str (absolute paths)
"""
groups = {}
for g in self._get_check_groups(group):
check_files = []
for sev, files in iteritems(self.config_dict[g]):
if (not severity) or severity == sev:
check_files += Config.get_check_files(group=g,
names=files,
severity=sev)
groups[g] = check_files
return groups
def get_checks_path():
"""
Get path to checks.
:return: str (absolute path of directory with checks)
"""
rel_path = os.path.join(os.pardir, os.pardir, os.pardir, "checks")
return os.path.abspath(os.path.join(__file__, rel_path))
def get_config_directory():
"""
Get the directory with config files
:return: str
"""
local_share = os.path.join(os.path.expanduser("~"),
".local",
CONFIG_DIRECTORY)
if os.path.isdir(local_share) and os.path.exists(local_share):
return local_share
usr_local_share = os.path.join("/usr/local", CONFIG_DIRECTORY)
if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):
return usr_local_share
raise ColinConfigException("Config directory cannot be found.")
|
normal
|
{
"blob_id": "7bb9455e6f0c15ab0be6963cff06ff41df73e6e0",
"index": 2583,
"step-1": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n <mask token>\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\ndef get_config_directory():\n \"\"\"\n Get the directory with config files\n\n :return: str\n \"\"\"\n local_share = os.path.join(os.path.expanduser('~'), '.local',\n CONFIG_DIRECTORY)\n if os.path.isdir(local_share) and os.path.exists(local_share):\n return local_share\n usr_local_share = os.path.join('/usr/local', CONFIG_DIRECTORY)\n if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):\n return usr_local_share\n raise ColinConfigException('Config directory cannot be found.')\n",
"step-5": "import json\nimport os\n\nfrom six import iteritems\n\nfrom ..exceptions import ColinConfigException\nfrom ..constant import CONFIG_DIRECTORY, JSON\nfrom ..loader import load_check_implementation\nfrom ..target import is_compatible\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group,\n severity=severity)\n groups = {}\n for (group, check_files) in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n\n check_classes = load_check_implementation(path=check_file, severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + \".py\")\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group,\n name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if (not severity) or severity == sev:\n check_files += Config.get_check_files(group=g,\n names=files,\n severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, \"checks\")\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\ndef get_config_directory():\n \"\"\"\n Get the directory with config files\n\n :return: str\n \"\"\"\n local_share = os.path.join(os.path.expanduser(\"~\"),\n \".local\",\n CONFIG_DIRECTORY)\n if os.path.isdir(local_share) and os.path.exists(local_share):\n return local_share\n\n usr_local_share = os.path.join(\"/usr/local\", CONFIG_DIRECTORY)\n if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):\n return usr_local_share\n\n raise ColinConfigException(\"Config directory cannot be found.\")\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cLineGraph(j_file):
data = []
with open(j_file) as f:
for line in f:
data.append(json.loads(line))
data = data[0]
in_other = 0
in_picture = 1
in_text = 2
values = []
time = []
x_coords = []
x_times = []
page_turns = []
pages = []
pic = []
text = []
p = 1
t0 = 0
first = 0
for i in range(0, len(data)):
if data[i].get('type') == 'Picture':
pic = data[i]
if data[i].get('type') == 'Text':
text = data[i]
if first == 0:
page_turns.append(0)
else:
page_turns.append(data[i + 1].get('timestamp') - t0)
pages.append(p)
p = p + 1
if data[i].get('type') == 'SampleGaze' or data[i].get('type'
) == 'SampleFixation':
if first == 0:
t0 = data[i].get('timestamp')
first = 1
time.append(data[i].get('timestamp') - t0)
x = data[i].get('x')
y = data[i].get('y')
if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'
) and y > pic.get('pt'):
values.append(in_picture)
elif x < text.get('tr') and x > text.get('tl') and y < text.get(
'tb') and y > text.get('tt'):
values.append(in_text)
x_coords.append(x)
x_times.append(data[i].get('timestamp') - t0)
else:
values.append(in_other)
d = []
v = values[0]
vs = []
ts = []
vs.append(v)
ts.append(time[0])
for i in range(1, len(values)):
if values[i] == v:
vs.append(v)
ts.append(time[i])
else:
d.append([ts, vs])
vs = []
ts = []
v = values[i]
vs.append(v)
ts.append(time[i])
for i in range(0, len(x_times)):
x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5
for plot in d:
if plot[1][0] == 0:
plt.plot(plot[0], plot[1], 'k', linewidth=10)
elif plot[1][0] == 1:
plt.plot(plot[0], plot[1], 'b', linewidth=10)
elif plot[1][0] == 2:
plt.plot(plot[0], plot[1], 'g', linewidth=10)
plt.axis([0, time[-1], -0.5, 2.5])
plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')
plt.xticks(page_turns, pages, size='small')
plt.xlabel('Page')
plt.ylabel('Eye Location on Page')
plt.savefig('linegraph' + j_file[11:-5] + '.png')
<|reserved_special_token_1|>
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.transforms import Bbox
from matplotlib.path import Path
import json
def cLineGraph(j_file):
data = []
with open(j_file) as f:
for line in f:
data.append(json.loads(line))
data = data[0]
in_other = 0
in_picture = 1
in_text = 2
values = []
time = []
x_coords = []
x_times = []
page_turns = []
pages = []
pic = []
text = []
p = 1
t0 = 0
first = 0
for i in range(0, len(data)):
if data[i].get('type') == 'Picture':
pic = data[i]
if data[i].get('type') == 'Text':
text = data[i]
if first == 0:
page_turns.append(0)
else:
page_turns.append(data[i + 1].get('timestamp') - t0)
pages.append(p)
p = p + 1
if data[i].get('type') == 'SampleGaze' or data[i].get('type'
) == 'SampleFixation':
if first == 0:
t0 = data[i].get('timestamp')
first = 1
time.append(data[i].get('timestamp') - t0)
x = data[i].get('x')
y = data[i].get('y')
if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'
) and y > pic.get('pt'):
values.append(in_picture)
elif x < text.get('tr') and x > text.get('tl') and y < text.get(
'tb') and y > text.get('tt'):
values.append(in_text)
x_coords.append(x)
x_times.append(data[i].get('timestamp') - t0)
else:
values.append(in_other)
d = []
v = values[0]
vs = []
ts = []
vs.append(v)
ts.append(time[0])
for i in range(1, len(values)):
if values[i] == v:
vs.append(v)
ts.append(time[i])
else:
d.append([ts, vs])
vs = []
ts = []
v = values[i]
vs.append(v)
ts.append(time[i])
for i in range(0, len(x_times)):
x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5
for plot in d:
if plot[1][0] == 0:
plt.plot(plot[0], plot[1], 'k', linewidth=10)
elif plot[1][0] == 1:
plt.plot(plot[0], plot[1], 'b', linewidth=10)
elif plot[1][0] == 2:
plt.plot(plot[0], plot[1], 'g', linewidth=10)
plt.axis([0, time[-1], -0.5, 2.5])
plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')
plt.xticks(page_turns, pages, size='small')
plt.xlabel('Page')
plt.ylabel('Eye Location on Page')
plt.savefig('linegraph' + j_file[11:-5] + '.png')
<|reserved_special_token_1|>
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.transforms import Bbox
from matplotlib.path import Path
import json
def cLineGraph(j_file):
data = []
with open(j_file) as f:
for line in f:
data.append(json.loads(line))
data = data[0]
in_other = 0
in_picture = 1
in_text = 2
values = []
time = []
x_coords = []
x_times = []
page_turns = []
pages = []
pic = []
text = []
p = 1
t0 = 0
first = 0
for i in range(0, len(data)):
if data[i].get('type') == 'Picture':
pic = data[i]
#print(pic, i)
if data[i].get('type') == 'Text':
text = data[i]
if first == 0:
page_turns.append(0)
else:
page_turns.append(data[i+1].get('timestamp') - t0)
pages.append(p)
p = p + 1
#print(text, i)
if data[i].get('type') == 'SampleGaze' or data[i].get('type') == 'SampleFixation':
#if data[i].get('type') == 'SampleFixation': # comment out line above and use this one for only fixation data
if first == 0:
t0 = data[i].get('timestamp')
first = 1
time.append(data[i].get('timestamp') - t0)
x = data[i].get('x')
y = data[i].get('y')
if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb') and y > pic.get('pt'):
values.append(in_picture)
elif x < text.get('tr') and x > text.get('tl') and y < text.get('tb') and y > text.get('tt'):
values.append(in_text)
x_coords.append(x)
x_times.append(data[i].get('timestamp') - t0)
else:
values.append(in_other)
d = []
v = values[0]
vs = []
ts = []
vs.append(v)
ts.append(time[0])
for i in range(1, len(values)):
if values[i] == v:
vs.append(v)
ts.append(time[i])
else:
d.append([ts, vs])
vs = []
ts = []
v = values[i]
vs.append(v)
ts.append(time[i])
for i in range(0, len(x_times)):
x_coords[i] = ((1/1920.0)*(x_coords[i])) + 1.5
for plot in d:
if plot[1][0] == 0: # other
plt.plot(plot[0], plot[1], 'k', linewidth=10)
elif plot[1][0] == 1: # picture
plt.plot(plot[0], plot[1], 'b', linewidth=10)
elif plot[1][0] == 2:
plt.plot(plot[0], plot[1], 'g', linewidth=10)
# THESE TWO LINES IMPLEMENT THE READING POINT PLOT FUNCTIONALITY
#plt.plot(x_times, x_coords, 'go')
#plt.plot(x_times, x_coords, 'g')
plt.axis([0, time[-1], -0.5, 2.5])
plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')
plt.xticks(page_turns, pages, size='small')
plt.xlabel('Page')
plt.ylabel('Eye Location on Page')
plt.savefig('linegraph' + j_file[11:-5] + '.png')
|
flexible
|
{
"blob_id": "319af5232c043d77a9d63ab1efa62d857da6db23",
"index": 1508,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cLineGraph(j_file):\n data = []\n with open(j_file) as f:\n for line in f:\n data.append(json.loads(line))\n data = data[0]\n in_other = 0\n in_picture = 1\n in_text = 2\n values = []\n time = []\n x_coords = []\n x_times = []\n page_turns = []\n pages = []\n pic = []\n text = []\n p = 1\n t0 = 0\n first = 0\n for i in range(0, len(data)):\n if data[i].get('type') == 'Picture':\n pic = data[i]\n if data[i].get('type') == 'Text':\n text = data[i]\n if first == 0:\n page_turns.append(0)\n else:\n page_turns.append(data[i + 1].get('timestamp') - t0)\n pages.append(p)\n p = p + 1\n if data[i].get('type') == 'SampleGaze' or data[i].get('type'\n ) == 'SampleFixation':\n if first == 0:\n t0 = data[i].get('timestamp')\n first = 1\n time.append(data[i].get('timestamp') - t0)\n x = data[i].get('x')\n y = data[i].get('y')\n if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'\n ) and y > pic.get('pt'):\n values.append(in_picture)\n elif x < text.get('tr') and x > text.get('tl') and y < text.get(\n 'tb') and y > text.get('tt'):\n values.append(in_text)\n x_coords.append(x)\n x_times.append(data[i].get('timestamp') - t0)\n else:\n values.append(in_other)\n d = []\n v = values[0]\n vs = []\n ts = []\n vs.append(v)\n ts.append(time[0])\n for i in range(1, len(values)):\n if values[i] == v:\n vs.append(v)\n ts.append(time[i])\n else:\n d.append([ts, vs])\n vs = []\n ts = []\n v = values[i]\n vs.append(v)\n ts.append(time[i])\n for i in range(0, len(x_times)):\n x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5\n for plot in d:\n if plot[1][0] == 0:\n plt.plot(plot[0], plot[1], 'k', linewidth=10)\n elif plot[1][0] == 1:\n plt.plot(plot[0], plot[1], 'b', linewidth=10)\n elif plot[1][0] == 2:\n plt.plot(plot[0], plot[1], 'g', linewidth=10)\n plt.axis([0, time[-1], -0.5, 2.5])\n plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')\n plt.xticks(page_turns, pages, size='small')\n plt.xlabel('Page')\n plt.ylabel('Eye Location on Page')\n plt.savefig('linegraph' + j_file[11:-5] + '.png')\n",
"step-3": "import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nfrom matplotlib.path import Path\nimport json\n\n\ndef cLineGraph(j_file):\n data = []\n with open(j_file) as f:\n for line in f:\n data.append(json.loads(line))\n data = data[0]\n in_other = 0\n in_picture = 1\n in_text = 2\n values = []\n time = []\n x_coords = []\n x_times = []\n page_turns = []\n pages = []\n pic = []\n text = []\n p = 1\n t0 = 0\n first = 0\n for i in range(0, len(data)):\n if data[i].get('type') == 'Picture':\n pic = data[i]\n if data[i].get('type') == 'Text':\n text = data[i]\n if first == 0:\n page_turns.append(0)\n else:\n page_turns.append(data[i + 1].get('timestamp') - t0)\n pages.append(p)\n p = p + 1\n if data[i].get('type') == 'SampleGaze' or data[i].get('type'\n ) == 'SampleFixation':\n if first == 0:\n t0 = data[i].get('timestamp')\n first = 1\n time.append(data[i].get('timestamp') - t0)\n x = data[i].get('x')\n y = data[i].get('y')\n if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'\n ) and y > pic.get('pt'):\n values.append(in_picture)\n elif x < text.get('tr') and x > text.get('tl') and y < text.get(\n 'tb') and y > text.get('tt'):\n values.append(in_text)\n x_coords.append(x)\n x_times.append(data[i].get('timestamp') - t0)\n else:\n values.append(in_other)\n d = []\n v = values[0]\n vs = []\n ts = []\n vs.append(v)\n ts.append(time[0])\n for i in range(1, len(values)):\n if values[i] == v:\n vs.append(v)\n ts.append(time[i])\n else:\n d.append([ts, vs])\n vs = []\n ts = []\n v = values[i]\n vs.append(v)\n ts.append(time[i])\n for i in range(0, len(x_times)):\n x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5\n for plot in d:\n if plot[1][0] == 0:\n plt.plot(plot[0], plot[1], 'k', linewidth=10)\n elif plot[1][0] == 1:\n plt.plot(plot[0], plot[1], 'b', linewidth=10)\n elif plot[1][0] == 2:\n plt.plot(plot[0], plot[1], 'g', linewidth=10)\n plt.axis([0, time[-1], -0.5, 2.5])\n plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')\n plt.xticks(page_turns, pages, size='small')\n plt.xlabel('Page')\n plt.ylabel('Eye Location on Page')\n plt.savefig('linegraph' + j_file[11:-5] + '.png')\n",
"step-4": "import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nfrom matplotlib.path import Path\nimport json\n\ndef cLineGraph(j_file):\n data = []\n\n with open(j_file) as f:\n for line in f:\n data.append(json.loads(line))\n data = data[0]\n\n in_other = 0\n in_picture = 1\n in_text = 2\n\n values = []\n time = []\n x_coords = []\n x_times = []\n\n page_turns = []\n pages = []\n\n pic = []\n text = []\n p = 1\n t0 = 0\n first = 0\n\n for i in range(0, len(data)):\n if data[i].get('type') == 'Picture':\n pic = data[i]\n #print(pic, i)\n if data[i].get('type') == 'Text':\n text = data[i]\n if first == 0:\n page_turns.append(0)\n else:\n page_turns.append(data[i+1].get('timestamp') - t0)\n pages.append(p)\n p = p + 1\n #print(text, i)\n if data[i].get('type') == 'SampleGaze' or data[i].get('type') == 'SampleFixation':\n #if data[i].get('type') == 'SampleFixation': # comment out line above and use this one for only fixation data\n if first == 0:\n t0 = data[i].get('timestamp')\n first = 1\n time.append(data[i].get('timestamp') - t0)\n x = data[i].get('x')\n y = data[i].get('y')\n if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb') and y > pic.get('pt'):\n values.append(in_picture)\n elif x < text.get('tr') and x > text.get('tl') and y < text.get('tb') and y > text.get('tt'):\n values.append(in_text)\n x_coords.append(x)\n x_times.append(data[i].get('timestamp') - t0)\n else:\n values.append(in_other)\n d = []\n v = values[0]\n vs = []\n ts = []\n vs.append(v)\n ts.append(time[0])\n for i in range(1, len(values)):\n if values[i] == v:\n vs.append(v)\n ts.append(time[i])\n else:\n d.append([ts, vs])\n vs = []\n ts = []\n v = values[i]\n vs.append(v)\n ts.append(time[i])\n for i in range(0, len(x_times)):\n x_coords[i] = ((1/1920.0)*(x_coords[i])) + 1.5\n\n for plot in d:\n if plot[1][0] == 0: # other\n plt.plot(plot[0], plot[1], 'k', linewidth=10)\n elif plot[1][0] == 1: # picture\n plt.plot(plot[0], plot[1], 'b', linewidth=10)\n elif plot[1][0] == 2:\n plt.plot(plot[0], plot[1], 'g', linewidth=10)\n \n # THESE TWO LINES IMPLEMENT THE READING POINT PLOT FUNCTIONALITY \n #plt.plot(x_times, x_coords, 'go')\n #plt.plot(x_times, x_coords, 'g')\n\n plt.axis([0, time[-1], -0.5, 2.5])\n plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')\n plt.xticks(page_turns, pages, size='small')\n plt.xlabel('Page')\n plt.ylabel('Eye Location on Page')\n plt.savefig('linegraph' + j_file[11:-5] + '.png')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# leetcode 836
# determine if two rectangles overlap
# input is two lists [x1,y1,x2,y2] coordinates
# where x1,y1 are coordinates of bottom left corner
# and x2,y2 are coordinates of top right corner
def overlap_rect(rec1, rec2):
"""Determine if rectangles overlap."""
# true if rec2 is left of rec1
a = rec2[2] <= rec1[0]
# true if rec2 is right of rec1
b = rec1[2] <= rec2[0]
# true if rec2 is below rec1
c = rec2[3] <= rec1[1]
# true if rec2 is above rec1
d = rec1[3] <= rec2[1]
return not (a or b or c or d)
|
normal
|
{
"blob_id": "0ef03ed455938bd2001581986c38104bfac395ce",
"index": 8078,
"step-1": "<mask token>\n",
"step-2": "def overlap_rect(rec1, rec2):\n \"\"\"Determine if rectangles overlap.\"\"\"\n a = rec2[2] <= rec1[0]\n b = rec1[2] <= rec2[0]\n c = rec2[3] <= rec1[1]\n d = rec1[3] <= rec2[1]\n return not (a or b or c or d)\n",
"step-3": "# leetcode 836\n# determine if two rectangles overlap\n# input is two lists [x1,y1,x2,y2] coordinates\n# where x1,y1 are coordinates of bottom left corner\n# and x2,y2 are coordinates of top right corner\n\ndef overlap_rect(rec1, rec2):\n \"\"\"Determine if rectangles overlap.\"\"\"\n # true if rec2 is left of rec1\n a = rec2[2] <= rec1[0]\n \n # true if rec2 is right of rec1\n b = rec1[2] <= rec2[0]\n\n # true if rec2 is below rec1\n c = rec2[3] <= rec1[1]\n\n # true if rec2 is above rec1\n d = rec1[3] <= rec2[1]\n\n return not (a or b or c or d)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.