code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
'''
Created on 18/10/2012
@author: matthias
'''
import os
import errno
import uuid
import glob
import shutil
import sys
import subprocess
import time
import pickle
import common.pbs
def prepare_directories(options, extension, subversiondir=None):
# extract datadir from options
datadir = options['datadir']
print("Creating directory {0:s}.".format(datadir+extension))
# recursively create directory
try:
os.makedirs(datadir+extension)
except OSError as err:
# check if the error is because the dir exists
if (err.errno == errno.EEXIST):
print("Directory \"{0:s}\" exists. Moving contents to backup dir.".format(datadir+extension))
# in that case we just create a unique directory and copy all the old stuff there
olddir = datadir+'/'+str(uuid.uuid4())+'.backup/'
os.makedirs(olddir)
# need to expand wildcards first
for file in glob.glob(datadir+extension+'*'):
shutil.move(file, olddir)
else:
print("Error \"{0:s}\" while creating directory.".format(err.strerror))
sys.exit(1)
# change into dir
os.chdir(datadir+extension)
# put in subversion information (if requested)
if subversiondir:
# open info file
infofile = open("svninfo", 'w')
# run svn info in subversiondir
subprocess.call(["svn", "info", subversiondir], stdout=infofile)
# close infofile
infofile.close()
# and return a path to the full dir
return datadir+extension
def runNode(options, executable, datadir, parameters):
"""Runs a job on the current machine."""
command = executable + ' ' + parameters;
print("Command is {0:s}".format(command))
#change to full directory
os.chdir(datadir)
# run only if it's not set to fake mode
if not options['fakeRun']:
# create files to capture output
outfile = open(options['outlog'], 'w')
errfile = open(options['errlog'], 'w')
# and time it
ts = time.time()
subprocess.call(command, stdout=outfile, stderr=errfile, shell=True)
t = time.time()-ts
# close log files
outfile.close()
errfile.close()
else:
print("Running (fakemode): {0:s} in directory {1:s}.".format(command, datadir))
t = 1.
# write timing information
timingfile = open(options['timingFile'], 'w')
pickle.dump(t, timingfile)
timingfile.close()
def run(options, executable, datadir, parameters):
"""Runs a job specified by executable. The name of the executable must contain the complete path.
If options['pbs'] is set to True, the jobs will be submitted to the PBS scheduler."""
if options['pbs']:
common.pbs.run(options, executable, datadir, parameters)
else:
runNode(options, executable, datadir, parameters)
|
normal
|
{
"blob_id": "6aeaa2ed01e0c0dac54cd8220c5da005fccc53e9",
"index": 2609,
"step-1": "<mask token>\n\n\ndef prepare_directories(options, extension, subversiondir=None):\n datadir = options['datadir']\n print('Creating directory {0:s}.'.format(datadir + extension))\n try:\n os.makedirs(datadir + extension)\n except OSError as err:\n if err.errno == errno.EEXIST:\n print('Directory \"{0:s}\" exists. Moving contents to backup dir.'\n .format(datadir + extension))\n olddir = datadir + '/' + str(uuid.uuid4()) + '.backup/'\n os.makedirs(olddir)\n for file in glob.glob(datadir + extension + '*'):\n shutil.move(file, olddir)\n else:\n print('Error \"{0:s}\" while creating directory.'.format(err.\n strerror))\n sys.exit(1)\n os.chdir(datadir + extension)\n if subversiondir:\n infofile = open('svninfo', 'w')\n subprocess.call(['svn', 'info', subversiondir], stdout=infofile)\n infofile.close()\n return datadir + extension\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef prepare_directories(options, extension, subversiondir=None):\n datadir = options['datadir']\n print('Creating directory {0:s}.'.format(datadir + extension))\n try:\n os.makedirs(datadir + extension)\n except OSError as err:\n if err.errno == errno.EEXIST:\n print('Directory \"{0:s}\" exists. Moving contents to backup dir.'\n .format(datadir + extension))\n olddir = datadir + '/' + str(uuid.uuid4()) + '.backup/'\n os.makedirs(olddir)\n for file in glob.glob(datadir + extension + '*'):\n shutil.move(file, olddir)\n else:\n print('Error \"{0:s}\" while creating directory.'.format(err.\n strerror))\n sys.exit(1)\n os.chdir(datadir + extension)\n if subversiondir:\n infofile = open('svninfo', 'w')\n subprocess.call(['svn', 'info', subversiondir], stdout=infofile)\n infofile.close()\n return datadir + extension\n\n\n<mask token>\n\n\ndef run(options, executable, datadir, parameters):\n \"\"\"Runs a job specified by executable. The name of the executable must contain the complete path.\n If options['pbs'] is set to True, the jobs will be submitted to the PBS scheduler.\"\"\"\n if options['pbs']:\n common.pbs.run(options, executable, datadir, parameters)\n else:\n runNode(options, executable, datadir, parameters)\n",
"step-3": "<mask token>\n\n\ndef prepare_directories(options, extension, subversiondir=None):\n datadir = options['datadir']\n print('Creating directory {0:s}.'.format(datadir + extension))\n try:\n os.makedirs(datadir + extension)\n except OSError as err:\n if err.errno == errno.EEXIST:\n print('Directory \"{0:s}\" exists. Moving contents to backup dir.'\n .format(datadir + extension))\n olddir = datadir + '/' + str(uuid.uuid4()) + '.backup/'\n os.makedirs(olddir)\n for file in glob.glob(datadir + extension + '*'):\n shutil.move(file, olddir)\n else:\n print('Error \"{0:s}\" while creating directory.'.format(err.\n strerror))\n sys.exit(1)\n os.chdir(datadir + extension)\n if subversiondir:\n infofile = open('svninfo', 'w')\n subprocess.call(['svn', 'info', subversiondir], stdout=infofile)\n infofile.close()\n return datadir + extension\n\n\ndef runNode(options, executable, datadir, parameters):\n \"\"\"Runs a job on the current machine.\"\"\"\n command = executable + ' ' + parameters\n print('Command is {0:s}'.format(command))\n os.chdir(datadir)\n if not options['fakeRun']:\n outfile = open(options['outlog'], 'w')\n errfile = open(options['errlog'], 'w')\n ts = time.time()\n subprocess.call(command, stdout=outfile, stderr=errfile, shell=True)\n t = time.time() - ts\n outfile.close()\n errfile.close()\n else:\n print('Running (fakemode): {0:s} in directory {1:s}.'.format(\n command, datadir))\n t = 1.0\n timingfile = open(options['timingFile'], 'w')\n pickle.dump(t, timingfile)\n timingfile.close()\n\n\ndef run(options, executable, datadir, parameters):\n \"\"\"Runs a job specified by executable. The name of the executable must contain the complete path.\n If options['pbs'] is set to True, the jobs will be submitted to the PBS scheduler.\"\"\"\n if options['pbs']:\n common.pbs.run(options, executable, datadir, parameters)\n else:\n runNode(options, executable, datadir, parameters)\n",
"step-4": "<mask token>\nimport os\nimport errno\nimport uuid\nimport glob\nimport shutil\nimport sys\nimport subprocess\nimport time\nimport pickle\nimport common.pbs\n\n\ndef prepare_directories(options, extension, subversiondir=None):\n datadir = options['datadir']\n print('Creating directory {0:s}.'.format(datadir + extension))\n try:\n os.makedirs(datadir + extension)\n except OSError as err:\n if err.errno == errno.EEXIST:\n print('Directory \"{0:s}\" exists. Moving contents to backup dir.'\n .format(datadir + extension))\n olddir = datadir + '/' + str(uuid.uuid4()) + '.backup/'\n os.makedirs(olddir)\n for file in glob.glob(datadir + extension + '*'):\n shutil.move(file, olddir)\n else:\n print('Error \"{0:s}\" while creating directory.'.format(err.\n strerror))\n sys.exit(1)\n os.chdir(datadir + extension)\n if subversiondir:\n infofile = open('svninfo', 'w')\n subprocess.call(['svn', 'info', subversiondir], stdout=infofile)\n infofile.close()\n return datadir + extension\n\n\ndef runNode(options, executable, datadir, parameters):\n \"\"\"Runs a job on the current machine.\"\"\"\n command = executable + ' ' + parameters\n print('Command is {0:s}'.format(command))\n os.chdir(datadir)\n if not options['fakeRun']:\n outfile = open(options['outlog'], 'w')\n errfile = open(options['errlog'], 'w')\n ts = time.time()\n subprocess.call(command, stdout=outfile, stderr=errfile, shell=True)\n t = time.time() - ts\n outfile.close()\n errfile.close()\n else:\n print('Running (fakemode): {0:s} in directory {1:s}.'.format(\n command, datadir))\n t = 1.0\n timingfile = open(options['timingFile'], 'w')\n pickle.dump(t, timingfile)\n timingfile.close()\n\n\ndef run(options, executable, datadir, parameters):\n \"\"\"Runs a job specified by executable. The name of the executable must contain the complete path.\n If options['pbs'] is set to True, the jobs will be submitted to the PBS scheduler.\"\"\"\n if options['pbs']:\n common.pbs.run(options, executable, datadir, parameters)\n else:\n runNode(options, executable, datadir, parameters)\n",
"step-5": "'''\nCreated on 18/10/2012\n\n@author: matthias\n'''\nimport os\nimport errno\nimport uuid\nimport glob\nimport shutil\nimport sys\nimport subprocess\nimport time\nimport pickle\nimport common.pbs\n\ndef prepare_directories(options, extension, subversiondir=None):\n # extract datadir from options\n datadir = options['datadir']\n \n print(\"Creating directory {0:s}.\".format(datadir+extension))\n\n # recursively create directory\n try:\n os.makedirs(datadir+extension)\n except OSError as err:\n # check if the error is because the dir exists\n if (err.errno == errno.EEXIST):\n print(\"Directory \\\"{0:s}\\\" exists. Moving contents to backup dir.\".format(datadir+extension))\n\n # in that case we just create a unique directory and copy all the old stuff there\n olddir = datadir+'/'+str(uuid.uuid4())+'.backup/'\n os.makedirs(olddir)\n # need to expand wildcards first\n for file in glob.glob(datadir+extension+'*'):\n shutil.move(file, olddir)\n else:\n print(\"Error \\\"{0:s}\\\" while creating directory.\".format(err.strerror))\n sys.exit(1)\n \n # change into dir\n os.chdir(datadir+extension)\n\n # put in subversion information (if requested)\n if subversiondir:\n # open info file\n infofile = open(\"svninfo\", 'w')\n \n # run svn info in subversiondir\n subprocess.call([\"svn\", \"info\", subversiondir], stdout=infofile)\n \n # close infofile\n infofile.close()\n \n # and return a path to the full dir\n return datadir+extension\n\ndef runNode(options, executable, datadir, parameters):\n \"\"\"Runs a job on the current machine.\"\"\"\n command = executable + ' ' + parameters;\n\n print(\"Command is {0:s}\".format(command))\n\n #change to full directory\n os.chdir(datadir)\n \n # run only if it's not set to fake mode\n if not options['fakeRun']:\n # create files to capture output\n outfile = open(options['outlog'], 'w')\n errfile = open(options['errlog'], 'w')\n \n # and time it\n ts = time.time()\n subprocess.call(command, stdout=outfile, stderr=errfile, shell=True)\n t = time.time()-ts\n \n # close log files\n outfile.close()\n errfile.close()\n else:\n print(\"Running (fakemode): {0:s} in directory {1:s}.\".format(command, datadir))\n t = 1.\n \n # write timing information \n timingfile = open(options['timingFile'], 'w')\n pickle.dump(t, timingfile)\n timingfile.close()\n \ndef run(options, executable, datadir, parameters):\n \"\"\"Runs a job specified by executable. The name of the executable must contain the complete path.\n If options['pbs'] is set to True, the jobs will be submitted to the PBS scheduler.\"\"\"\n if options['pbs']:\n common.pbs.run(options, executable, datadir, parameters)\n else:\n runNode(options, executable, datadir, parameters)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Tokenizer:
def __init__(self, buf):
self.buf = buf
self.index = 0
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def skip_whitespaces(self):
while self.index < len(self.buf) and self.token().isspace():
self.move(1)
def next(self):
self.skip_whitespaces()
if self.index < len(self.buf):
if self.token() == '+':
self.move(1)
return Symbol('+')
elif self.token() == '-':
self.move(1)
return Symbol('-')
elif self.token() == '*':
self.move(1)
return Symbol('*')
elif self.token() == '/':
self.move(1)
return Symbol('/')
elif self.token() == '(':
self.move(1)
return OpenParen()
elif self.token() == ')':
self.move(1)
return CloseParen()
elif self.token().isnumeric():
number = int(self.token())
self.move(1)
while self.index < len(self.buf) and self.token().isnumeric():
number = number * 10 + int(self.token())
self.move(1)
return Number(number)
else:
char = self.token()
self.move(1)
return Undefined(char)
else:
return Eof()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Tokenizer:
def __init__(self, buf):
self.buf = buf
self.index = 0
<|reserved_special_token_0|>
def move(self, value):
self.index += value
def skip_whitespaces(self):
while self.index < len(self.buf) and self.token().isspace():
self.move(1)
def next(self):
self.skip_whitespaces()
if self.index < len(self.buf):
if self.token() == '+':
self.move(1)
return Symbol('+')
elif self.token() == '-':
self.move(1)
return Symbol('-')
elif self.token() == '*':
self.move(1)
return Symbol('*')
elif self.token() == '/':
self.move(1)
return Symbol('/')
elif self.token() == '(':
self.move(1)
return OpenParen()
elif self.token() == ')':
self.move(1)
return CloseParen()
elif self.token().isnumeric():
number = int(self.token())
self.move(1)
while self.index < len(self.buf) and self.token().isnumeric():
number = number * 10 + int(self.token())
self.move(1)
return Number(number)
else:
char = self.token()
self.move(1)
return Undefined(char)
else:
return Eof()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Tokenizer:
def __init__(self, buf):
self.buf = buf
self.index = 0
def token(self):
return self.buf[self.index]
def move(self, value):
self.index += value
def skip_whitespaces(self):
while self.index < len(self.buf) and self.token().isspace():
self.move(1)
def next(self):
self.skip_whitespaces()
if self.index < len(self.buf):
if self.token() == '+':
self.move(1)
return Symbol('+')
elif self.token() == '-':
self.move(1)
return Symbol('-')
elif self.token() == '*':
self.move(1)
return Symbol('*')
elif self.token() == '/':
self.move(1)
return Symbol('/')
elif self.token() == '(':
self.move(1)
return OpenParen()
elif self.token() == ')':
self.move(1)
return CloseParen()
elif self.token().isnumeric():
number = int(self.token())
self.move(1)
while self.index < len(self.buf) and self.token().isnumeric():
number = number * 10 + int(self.token())
self.move(1)
return Number(number)
else:
char = self.token()
self.move(1)
return Undefined(char)
else:
return Eof()
<|reserved_special_token_1|>
from types import *
class Tokenizer:
def __init__(self, buf):
self.buf = buf
self.index = 0
def token(self):
return self.buf[self.index]
def move(self, value):
self.index += value
def skip_whitespaces(self):
while self.index < len(self.buf) and self.token().isspace():
self.move(1)
def next(self):
self.skip_whitespaces()
if self.index < len(self.buf):
if self.token() == '+':
self.move(1)
return Symbol('+')
elif self.token() == '-':
self.move(1)
return Symbol('-')
elif self.token() == '*':
self.move(1)
return Symbol('*')
elif self.token() == '/':
self.move(1)
return Symbol('/')
elif self.token() == '(':
self.move(1)
return OpenParen()
elif self.token() == ')':
self.move(1)
return CloseParen()
elif self.token().isnumeric():
number = int(self.token())
self.move(1)
while self.index < len(self.buf) and self.token().isnumeric():
number = number * 10 + int(self.token())
self.move(1)
return Number(number)
else:
char = self.token()
self.move(1)
return Undefined(char)
else:
return Eof()
<|reserved_special_token_1|>
from types import *
class Tokenizer:
def __init__(self, buf):
self.buf = buf
self.index = 0
def token(self):
return self.buf[self.index]
def move(self, value):
self.index += value
def skip_whitespaces(self):
while self.index < len(self.buf) and self.token().isspace():
self.move(1)
def next(self):
self.skip_whitespaces()
if self.index < len(self.buf):
if self.token() == '+':
self.move(1)
return Symbol('+')
elif self.token() == '-':
self.move(1)
return Symbol('-')
elif self.token() == '*':
self.move(1)
return Symbol('*')
elif self.token() == '/':
self.move(1)
return Symbol('/')
elif self.token() == '(':
self.move(1)
return OpenParen()
elif self.token() == ')':
self.move(1)
return CloseParen()
else:
if self.token().isnumeric():
number = int(self.token())
self.move(1)
while self.index < len(self.buf) and self.token().isnumeric():
number = number * 10 + int(self.token())
self.move(1)
return Number(number)
else:
char = self.token()
self.move(1)
return Undefined(char)
else:
return Eof()
|
flexible
|
{
"blob_id": "282bccf20cfb114e31c5465c110819796bf81bc0",
"index": 9318,
"step-1": "<mask token>\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n <mask token>\n <mask token>\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-2": "<mask token>\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n <mask token>\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-3": "<mask token>\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n\n def token(self):\n return self.buf[self.index]\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-4": "from types import *\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n\n def token(self):\n return self.buf[self.index]\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-5": "from types import *\n\nclass Tokenizer:\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n\n def token(self):\n return self.buf[self.index]\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n else:\n if self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
{'name': 'Islamic Datepicker', 'category': 'Extra Tools', 'author':
'Mostafa Mohamed', 'website':
'https://eg.linkedin.com/in/mostafa-mohammed-449a8786', 'price': 25.0,
'currency': 'EUR', 'version': '9.0.1.0.1', 'depends': ['base', 'web'],
'data': ['views/islamic_template.xml'], 'qweb': [
'static/src/xml/islamice_date_widget.xml'], 'auto_install': False,
'installable': True}
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
{
'name': 'Islamic Datepicker',
'category': 'Extra Tools',
'author': 'Mostafa Mohamed',
'website': 'https://eg.linkedin.com/in/mostafa-mohammed-449a8786',
'price': 25.00,
'currency': 'EUR',
'version': '9.0.1.0.1',
'depends': ['base','web'],
'data': [
'views/islamic_template.xml',
],
'qweb': [
"static/src/xml/islamice_date_widget.xml",
],
'auto_install': False,
'installable': True
}
|
flexible
|
{
"blob_id": "51a4d8f1be7009b69f0b69bdd51a0077256304a9",
"index": 7222,
"step-1": "<mask token>\n",
"step-2": "{'name': 'Islamic Datepicker', 'category': 'Extra Tools', 'author':\n 'Mostafa Mohamed', 'website':\n 'https://eg.linkedin.com/in/mostafa-mohammed-449a8786', 'price': 25.0,\n 'currency': 'EUR', 'version': '9.0.1.0.1', 'depends': ['base', 'web'],\n 'data': ['views/islamic_template.xml'], 'qweb': [\n 'static/src/xml/islamice_date_widget.xml'], 'auto_install': False,\n 'installable': True}\n",
"step-3": "# -*- coding: utf-8 -*-\n{\n 'name': 'Islamic Datepicker',\n 'category': 'Extra Tools',\n 'author': 'Mostafa Mohamed',\n 'website': 'https://eg.linkedin.com/in/mostafa-mohammed-449a8786',\n 'price': 25.00,\n 'currency': 'EUR',\n 'version': '9.0.1.0.1',\n 'depends': ['base','web'],\n 'data': [\n 'views/islamic_template.xml',\n ],\n 'qweb': [\n \"static/src/xml/islamice_date_widget.xml\",\n ],\n 'auto_install': False,\n 'installable': True\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s.bind(('', 8082))
s.listen(1)
<|reserved_special_token_0|>
os.dup2(conn.fileno(), 0)
os.dup2(conn.fileno(), 1)
os.system('/bin/bash')
conn.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 8082))
s.listen(1)
conn, __ = s.accept()
os.dup2(conn.fileno(), 0)
os.dup2(conn.fileno(), 1)
os.system('/bin/bash')
conn.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import socket
import os
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 8082))
s.listen(1)
conn, __ = s.accept()
os.dup2(conn.fileno(), 0)
os.dup2(conn.fileno(), 1)
os.system('/bin/bash')
conn.close()
<|reserved_special_token_1|>
'''
quick and dirty remote shell using sockets and file descriptors
'''
import socket
import os
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(('',8082))
s.listen(1)
conn,__=s.accept()
os.dup2(conn.fileno(),0)
os.dup2(conn.fileno(),1)
#print("asdf")
os.system('/bin/bash')
conn.close()
|
flexible
|
{
"blob_id": "38a2113c0531648a90cf70c4b18d640d5ebb3f47",
"index": 5637,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.bind(('', 8082))\ns.listen(1)\n<mask token>\nos.dup2(conn.fileno(), 0)\nos.dup2(conn.fileno(), 1)\nos.system('/bin/bash')\nconn.close()\n",
"step-3": "<mask token>\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('', 8082))\ns.listen(1)\nconn, __ = s.accept()\nos.dup2(conn.fileno(), 0)\nos.dup2(conn.fileno(), 1)\nos.system('/bin/bash')\nconn.close()\n",
"step-4": "<mask token>\nimport socket\nimport os\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('', 8082))\ns.listen(1)\nconn, __ = s.accept()\nos.dup2(conn.fileno(), 0)\nos.dup2(conn.fileno(), 1)\nos.system('/bin/bash')\nconn.close()\n",
"step-5": "'''\nquick and dirty remote shell using sockets and file descriptors\n'''\nimport socket\nimport os\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind(('',8082))\n\ns.listen(1)\n\nconn,__=s.accept()\n\nos.dup2(conn.fileno(),0)\nos.dup2(conn.fileno(),1)\n\n#print(\"asdf\")\nos.system('/bin/bash')\n\t\nconn.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def binary_add(x, y):
""" Adds two binary arrays together. """
assert len(x) == len(y)
z = [0] * (len(x) + 1)
for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):
if i not in [0, 1]:
return False
if j not in [0, 1]:
return False
if i and j:
z[a] += 0
z[a + 1] += 1
elif i or j:
z[a] += 1
else:
pass
if z[a] == 2:
z[a + 1] += 1
z[a] -= 2
return z[::-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def binary_add(x, y):
""" Adds two binary arrays together. """
assert len(x) == len(y)
z = [0] * (len(x) + 1)
for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):
if i not in [0, 1]:
return False
if j not in [0, 1]:
return False
if i and j:
z[a] += 0
z[a + 1] += 1
elif i or j:
z[a] += 1
else:
pass
if z[a] == 2:
z[a + 1] += 1
z[a] -= 2
return z[::-1]
def unit_test():
""" Unit tests. """
x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]
y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]
z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [
1, 0, 0, 0, 0, 0]
for a, (x, y) in enumerate(zip(x_arr, y_arr)):
sum = binary_add(x, y)
print('Adding {} to {}.'.format(x, y))
if sum == z_arr[a]:
print('Successfully returned {}.'.format(sum))
else:
print('Got {} instead of {}.'.format(sum, z_arr[a]))
print()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def binary_add(x, y):
""" Adds two binary arrays together. """
assert len(x) == len(y)
z = [0] * (len(x) + 1)
for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):
if i not in [0, 1]:
return False
if j not in [0, 1]:
return False
if i and j:
z[a] += 0
z[a + 1] += 1
elif i or j:
z[a] += 1
else:
pass
if z[a] == 2:
z[a + 1] += 1
z[a] -= 2
return z[::-1]
def unit_test():
""" Unit tests. """
x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]
y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]
z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [
1, 0, 0, 0, 0, 0]
for a, (x, y) in enumerate(zip(x_arr, y_arr)):
sum = binary_add(x, y)
print('Adding {} to {}.'.format(x, y))
if sum == z_arr[a]:
print('Successfully returned {}.'.format(sum))
else:
print('Got {} instead of {}.'.format(sum, z_arr[a]))
print()
if __name__ == '__main__':
unit_test()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'David Vaillant'
__credits__ = 'CLRS, Chapter 2.1'
def binary_add(x, y):
""" Adds two binary arrays together. """
assert len(x) == len(y)
z = [0] * (len(x) + 1)
for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):
if i not in [0, 1]:
return False
if j not in [0, 1]:
return False
if i and j:
z[a] += 0
z[a + 1] += 1
elif i or j:
z[a] += 1
else:
pass
if z[a] == 2:
z[a + 1] += 1
z[a] -= 2
return z[::-1]
def unit_test():
""" Unit tests. """
x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]
y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]
z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [
1, 0, 0, 0, 0, 0]
for a, (x, y) in enumerate(zip(x_arr, y_arr)):
sum = binary_add(x, y)
print('Adding {} to {}.'.format(x, y))
if sum == z_arr[a]:
print('Successfully returned {}.'.format(sum))
else:
print('Got {} instead of {}.'.format(sum, z_arr[a]))
print()
if __name__ == '__main__':
unit_test()
<|reserved_special_token_1|>
""" binary_adder.py: Takes two arrays representing binary numbers,
adds them together. """
__author__ = "David Vaillant"
__credits__ = "CLRS, Chapter 2.1"
def binary_add(x, y):
""" Adds two binary arrays together. """
# Makes sure that the arrays have the same length.
# Could be changed to padding on extra zeroes, if so desired.
assert(len(x) == len(y))
z = [0] * (len(x)+1)
for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):
# Makes sure that the array is a binary array.
# Strictly speaking, not necessary. But nice.
if i not in [0, 1]: return False
if j not in [0, 1]: return False
# if i and j are both 1
if i and j:
z[a] += 0
z[a+1] += 1
# if only one of them is 1
elif i or j:
z[a] += 1
# if they're both 0
else: pass
if z[a] == 2:
z[a+1] += 1
z[a] -= 2
return z[::-1]
def unit_test():
""" Unit tests. """
x_arr = ( [1, 0, 0],
[1],
[0],
[1, 0, 0, 1],
[1, 1, 1, 1],
[1, 0, 0, 0, 0])
y_arr = ( [0, 1, 1],
[0],
[0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0, 0])
z_arr = ( [0, 1, 1, 1],
[0, 1],
None,
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0] )
for a, (x, y) in enumerate(zip(x_arr, y_arr)):
sum = binary_add(x, y)
print("Adding {} to {}.".format(x, y))
if sum == z_arr[a]:
print("Successfully returned {}.".format(sum))
else:
print("Got {} instead of {}.".format(sum, z_arr[a]))
print()
if __name__ == "__main__":
unit_test()
|
flexible
|
{
"blob_id": "40aa9e7cf0aaca24054297ca80aaf468ba485966",
"index": 5621,
"step-1": "<mask token>\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]\n y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]\n z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [\n 1, 0, 0, 0, 0, 0]\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print('Adding {} to {}.'.format(x, y))\n if sum == z_arr[a]:\n print('Successfully returned {}.'.format(sum))\n else:\n print('Got {} instead of {}.'.format(sum, z_arr[a]))\n print()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]\n y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]\n z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [\n 1, 0, 0, 0, 0, 0]\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print('Adding {} to {}.'.format(x, y))\n if sum == z_arr[a]:\n print('Successfully returned {}.'.format(sum))\n else:\n print('Got {} instead of {}.'.format(sum, z_arr[a]))\n print()\n\n\nif __name__ == '__main__':\n unit_test()\n",
"step-4": "<mask token>\n__author__ = 'David Vaillant'\n__credits__ = 'CLRS, Chapter 2.1'\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]\n y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]\n z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [\n 1, 0, 0, 0, 0, 0]\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print('Adding {} to {}.'.format(x, y))\n if sum == z_arr[a]:\n print('Successfully returned {}.'.format(sum))\n else:\n print('Got {} instead of {}.'.format(sum, z_arr[a]))\n print()\n\n\nif __name__ == '__main__':\n unit_test()\n",
"step-5": "\"\"\" binary_adder.py: Takes two arrays representing binary numbers,\n adds them together. \"\"\"\n\n__author__ = \"David Vaillant\"\n__credits__ = \"CLRS, Chapter 2.1\"\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n # Makes sure that the arrays have the same length.\n # Could be changed to padding on extra zeroes, if so desired.\n assert(len(x) == len(y))\n\n z = [0] * (len(x)+1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n # Makes sure that the array is a binary array.\n # Strictly speaking, not necessary. But nice.\n if i not in [0, 1]: return False\n if j not in [0, 1]: return False\n\n # if i and j are both 1 \n if i and j:\n z[a] += 0\n z[a+1] += 1\n # if only one of them is 1\n elif i or j:\n z[a] += 1\n # if they're both 0\n else: pass\n\n if z[a] == 2:\n z[a+1] += 1\n z[a] -= 2\n \n return z[::-1]\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = ( [1, 0, 0],\n [1],\n [0],\n [1, 0, 0, 1],\n [1, 1, 1, 1],\n [1, 0, 0, 0, 0])\n y_arr = ( [0, 1, 1],\n [0],\n [0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [1, 0, 0, 0, 0])\n z_arr = ( [0, 1, 1, 1],\n [0, 1],\n None,\n [1, 0, 1, 0, 1],\n [0, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0] )\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print(\"Adding {} to {}.\".format(x, y))\n if sum == z_arr[a]:\n print(\"Successfully returned {}.\".format(sum))\n else:\n print(\"Got {} instead of {}.\".format(sum, z_arr[a]))\n print()\n\nif __name__ == \"__main__\":\n unit_test()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('-t', '--testing', action='store_true')
parser.add_argument('-i', '--init', action='store_true')
parser.add_argument('-r', '--reinit', action='store_true')
<|reserved_special_token_0|>
try:
sys.argv.remove('-t')
except:
pass
try:
sys.argv.remove('--testing')
except:
pass
try:
sys.argv.remove('-i')
except:
pass
try:
sys.argv.remove('--init')
except:
pass
try:
sys.argv.remove('-r')
except:
pass
try:
sys.argv.remove('--reinit')
except:
pass
<|reserved_special_token_0|>
CORS(app)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--testing', action='store_true')
parser.add_argument('-i', '--init', action='store_true')
parser.add_argument('-r', '--reinit', action='store_true')
args = parser.parse_known_args()
<|reserved_special_token_0|>
try:
sys.argv.remove('-t')
except:
pass
try:
sys.argv.remove('--testing')
except:
pass
try:
sys.argv.remove('-i')
except:
pass
try:
sys.argv.remove('--init')
except:
pass
try:
sys.argv.remove('-r')
except:
pass
try:
sys.argv.remove('--reinit')
except:
pass
app = Flask(__name__)
app.config['TOKEN_SECRET'] = 'Secret_Token'
app.config['SECRET_KEY'] = 'Secret_Key'
app.config['CORS_HEADERS'] = ['Content-Type', 'Authorization']
app.config['CORS_AUTOMATIC_OPTIONS'] = True
CORS(app)
app.config['TESTING'] = args[0].testing
app.config['INIT'] = args[0].init
app.config['REINIT'] = args[0].reinit
<|reserved_special_token_0|>
dbManager = DatabaseManager()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from flask import Flask
from flask_cors import CORS
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--testing', action='store_true')
parser.add_argument('-i', '--init', action='store_true')
parser.add_argument('-r', '--reinit', action='store_true')
args = parser.parse_known_args()
import sys
try:
sys.argv.remove('-t')
except:
pass
try:
sys.argv.remove('--testing')
except:
pass
try:
sys.argv.remove('-i')
except:
pass
try:
sys.argv.remove('--init')
except:
pass
try:
sys.argv.remove('-r')
except:
pass
try:
sys.argv.remove('--reinit')
except:
pass
app = Flask(__name__)
app.config['TOKEN_SECRET'] = 'Secret_Token'
app.config['SECRET_KEY'] = 'Secret_Key'
app.config['CORS_HEADERS'] = ['Content-Type', 'Authorization']
app.config['CORS_AUTOMATIC_OPTIONS'] = True
CORS(app)
app.config['TESTING'] = args[0].testing
app.config['INIT'] = args[0].init
app.config['REINIT'] = args[0].reinit
from SmartRecruiting_BackEnd.data import DatabaseManager
dbManager = DatabaseManager()
import SmartRecruiting_BackEnd.api.routes
import SmartRecruiting_BackEnd.data
import SmartRecruiting_BackEnd.deeplearning.preprocess
<|reserved_special_token_1|>
# encoding: utf-8
# -*- coding: utf-8 -*-
"""
The flask application package.
"""
#parse arguments
from flask import Flask
from flask_cors import CORS
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--testing', action='store_true') #to use the testing database
parser.add_argument('-i', '--init', action='store_true') #to use the testing database
parser.add_argument('-r', '--reinit', action='store_true') #to use the testing database
args = parser.parse_known_args()
#remove arguments to not interfere with unittest
import sys
try:
sys.argv.remove('-t')
except:
pass
try:
sys.argv.remove('--testing')
except:
pass
try:
sys.argv.remove('-i')
except:
pass
try:
sys.argv.remove('--init')
except:
pass
try:
sys.argv.remove('-r')
except:
pass
try:
sys.argv.remove('--reinit')
except:
pass
app = Flask(__name__)
app.config['TOKEN_SECRET'] = 'Secret_Token' #Change this
app.config['SECRET_KEY'] = 'Secret_Key' #Change this
app.config['CORS_HEADERS'] = ['Content-Type', 'Authorization']
app.config['CORS_AUTOMATIC_OPTIONS'] = True
CORS(app)
app.config['TESTING'] = args[0].testing
app.config['INIT'] = args[0].init
app.config['REINIT'] = args[0].reinit
from SmartRecruiting_BackEnd.data import DatabaseManager
dbManager = DatabaseManager()
import SmartRecruiting_BackEnd.api.routes
import SmartRecruiting_BackEnd.data
import SmartRecruiting_BackEnd.deeplearning.preprocess
|
flexible
|
{
"blob_id": "e403a84ec2a3104cb908933f6949458cccc791c3",
"index": 4737,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-t', '--testing', action='store_true')\nparser.add_argument('-i', '--init', action='store_true')\nparser.add_argument('-r', '--reinit', action='store_true')\n<mask token>\ntry:\n sys.argv.remove('-t')\nexcept:\n pass\ntry:\n sys.argv.remove('--testing')\nexcept:\n pass\ntry:\n sys.argv.remove('-i')\nexcept:\n pass\ntry:\n sys.argv.remove('--init')\nexcept:\n pass\ntry:\n sys.argv.remove('-r')\nexcept:\n pass\ntry:\n sys.argv.remove('--reinit')\nexcept:\n pass\n<mask token>\nCORS(app)\n<mask token>\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('-t', '--testing', action='store_true')\nparser.add_argument('-i', '--init', action='store_true')\nparser.add_argument('-r', '--reinit', action='store_true')\nargs = parser.parse_known_args()\n<mask token>\ntry:\n sys.argv.remove('-t')\nexcept:\n pass\ntry:\n sys.argv.remove('--testing')\nexcept:\n pass\ntry:\n sys.argv.remove('-i')\nexcept:\n pass\ntry:\n sys.argv.remove('--init')\nexcept:\n pass\ntry:\n sys.argv.remove('-r')\nexcept:\n pass\ntry:\n sys.argv.remove('--reinit')\nexcept:\n pass\napp = Flask(__name__)\napp.config['TOKEN_SECRET'] = 'Secret_Token'\napp.config['SECRET_KEY'] = 'Secret_Key'\napp.config['CORS_HEADERS'] = ['Content-Type', 'Authorization']\napp.config['CORS_AUTOMATIC_OPTIONS'] = True\nCORS(app)\napp.config['TESTING'] = args[0].testing\napp.config['INIT'] = args[0].init\napp.config['REINIT'] = args[0].reinit\n<mask token>\ndbManager = DatabaseManager()\n<mask token>\n",
"step-4": "<mask token>\nfrom flask import Flask\nfrom flask_cors import CORS\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-t', '--testing', action='store_true')\nparser.add_argument('-i', '--init', action='store_true')\nparser.add_argument('-r', '--reinit', action='store_true')\nargs = parser.parse_known_args()\nimport sys\ntry:\n sys.argv.remove('-t')\nexcept:\n pass\ntry:\n sys.argv.remove('--testing')\nexcept:\n pass\ntry:\n sys.argv.remove('-i')\nexcept:\n pass\ntry:\n sys.argv.remove('--init')\nexcept:\n pass\ntry:\n sys.argv.remove('-r')\nexcept:\n pass\ntry:\n sys.argv.remove('--reinit')\nexcept:\n pass\napp = Flask(__name__)\napp.config['TOKEN_SECRET'] = 'Secret_Token'\napp.config['SECRET_KEY'] = 'Secret_Key'\napp.config['CORS_HEADERS'] = ['Content-Type', 'Authorization']\napp.config['CORS_AUTOMATIC_OPTIONS'] = True\nCORS(app)\napp.config['TESTING'] = args[0].testing\napp.config['INIT'] = args[0].init\napp.config['REINIT'] = args[0].reinit\nfrom SmartRecruiting_BackEnd.data import DatabaseManager\ndbManager = DatabaseManager()\nimport SmartRecruiting_BackEnd.api.routes\nimport SmartRecruiting_BackEnd.data\nimport SmartRecruiting_BackEnd.deeplearning.preprocess\n",
"step-5": "# encoding: utf-8\n# -*- coding: utf-8 -*-\n\"\"\"\nThe flask application package.\n\"\"\"\n\n#parse arguments\n\nfrom flask import Flask\nfrom flask_cors import CORS\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-t', '--testing', action='store_true') #to use the testing database\nparser.add_argument('-i', '--init', action='store_true') #to use the testing database\nparser.add_argument('-r', '--reinit', action='store_true') #to use the testing database\nargs = parser.parse_known_args()\n\n#remove arguments to not interfere with unittest\nimport sys\ntry:\n sys.argv.remove('-t')\nexcept:\n pass\ntry:\n sys.argv.remove('--testing')\nexcept:\n pass\ntry:\n sys.argv.remove('-i')\nexcept:\n pass\ntry:\n sys.argv.remove('--init')\nexcept:\n pass\ntry:\n sys.argv.remove('-r')\nexcept:\n pass\ntry:\n sys.argv.remove('--reinit')\nexcept:\n pass\n\n\napp = Flask(__name__)\napp.config['TOKEN_SECRET'] = 'Secret_Token' #Change this\napp.config['SECRET_KEY'] = 'Secret_Key' #Change this\napp.config['CORS_HEADERS'] = ['Content-Type', 'Authorization']\napp.config['CORS_AUTOMATIC_OPTIONS'] = True\nCORS(app)\n\napp.config['TESTING'] = args[0].testing\napp.config['INIT'] = args[0].init\napp.config['REINIT'] = args[0].reinit\n\nfrom SmartRecruiting_BackEnd.data import DatabaseManager\ndbManager = DatabaseManager()\n\nimport SmartRecruiting_BackEnd.api.routes\nimport SmartRecruiting_BackEnd.data\nimport SmartRecruiting_BackEnd.deeplearning.preprocess\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while i < 10:
print('Hello', 2 * i + 5)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
i = 0
while i < 10:
print('Hello', 2 * i + 5)
i = i + 1
<|reserved_special_token_1|>
i = 0
while i < 10:
print("Hello", 2 * i + 5)
i = i + 1
|
flexible
|
{
"blob_id": "e22574b5c458c23c48915274656f95a375cdc0e6",
"index": 6181,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i < 10:\n print('Hello', 2 * i + 5)\n<mask token>\n",
"step-3": "i = 0\nwhile i < 10:\n print('Hello', 2 * i + 5)\ni = i + 1\n",
"step-4": "\r\ni = 0\r\nwhile i < 10:\r\n print(\"Hello\", 2 * i + 5)\r\ni = i + 1",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Author: ulysses
Date: 1970-01-01 08:00:00
LastEditTime: 2020-08-03 15:44:57
LastEditors: Please set LastEditors
Description:
'''
from pyspark.sql import SparkSession
from pyspark.sql.functions import split, explode
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName('StructedSocketWordCount')\
.master('local[4]')\
.getOrCreate()
sc =spark.sparkContext
sc.setLogLevel('WARN')
# 从socket源读取stream
lines = spark\
.readStream\
.format('socket')\
.option('host', 'localhost')\
.option('port', 9999)\
.load()
words = lines.select(
explode(
split(lines.value, ' ') # 空格拆开
).alias('word') # 将一行列表 打开 一列数据
)
# word , count
wordcounts = words.groupBy('word').count()
# 输出
query = wordcounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.trigger(processingTime="8 seconds")\
.start()
query.awaitTermination()
|
normal
|
{
"blob_id": "991260c268d53fbe73e9bff9990ac536ed802d7a",
"index": 6887,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n spark = SparkSession.builder.appName('StructedSocketWordCount').master(\n 'local[4]').getOrCreate()\n sc = spark.sparkContext\n sc.setLogLevel('WARN')\n lines = spark.readStream.format('socket').option('host', 'localhost'\n ).option('port', 9999).load()\n words = lines.select(explode(split(lines.value, ' ')).alias('word'))\n wordcounts = words.groupBy('word').count()\n query = wordcounts.writeStream.outputMode('complete').format('console'\n ).trigger(processingTime='8 seconds').start()\n query.awaitTermination()\n",
"step-3": "<mask token>\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import split, explode\nif __name__ == '__main__':\n spark = SparkSession.builder.appName('StructedSocketWordCount').master(\n 'local[4]').getOrCreate()\n sc = spark.sparkContext\n sc.setLogLevel('WARN')\n lines = spark.readStream.format('socket').option('host', 'localhost'\n ).option('port', 9999).load()\n words = lines.select(explode(split(lines.value, ' ')).alias('word'))\n wordcounts = words.groupBy('word').count()\n query = wordcounts.writeStream.outputMode('complete').format('console'\n ).trigger(processingTime='8 seconds').start()\n query.awaitTermination()\n",
"step-4": "'''\nAuthor: ulysses\nDate: 1970-01-01 08:00:00\nLastEditTime: 2020-08-03 15:44:57\nLastEditors: Please set LastEditors\nDescription: \n'''\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import split, explode\n\n\nif __name__ == \"__main__\":\n spark = SparkSession\\\n .builder\\\n .appName('StructedSocketWordCount')\\\n .master('local[4]')\\\n .getOrCreate()\n \n sc =spark.sparkContext\n sc.setLogLevel('WARN')\n\n # 从socket源读取stream\n lines = spark\\\n .readStream\\\n .format('socket')\\\n .option('host', 'localhost')\\\n .option('port', 9999)\\\n .load()\n \n words = lines.select(\n explode(\n split(lines.value, ' ') # 空格拆开\n ).alias('word') # 将一行列表 打开 一列数据\n )\n # word , count\n wordcounts = words.groupBy('word').count()\n \n # 输出\n query = wordcounts\\\n .writeStream\\\n .outputMode('complete')\\\n .format('console')\\\n .trigger(processingTime=\"8 seconds\")\\\n .start()\n \n query.awaitTermination()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.0.7 on 2018-08-14 21:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orion_integration', '0005_auto_20180814_1747'),
]
operations = [
migrations.CreateModel(
name='OrionAPMApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, db_index=True, help_text='object creation time stamp', verbose_name='created on')),
('updated_on', models.DateTimeField(auto_now_add=True, db_index=True, help_text='object update time stamp', verbose_name='updated on')),
('enabled', models.BooleanField(db_index=True, default=True, verbose_name='enabled')),
('notes', models.TextField(blank=True, null=True, verbose_name='notes')),
('orion_id', models.BigIntegerField(db_index=True, help_text='Use the value in this field to query the Orion server', unique=True, verbose_name='Orion Object Id')),
('application_name', models.CharField(db_index=True, help_text='The application name as reported by Orion.APM.Application', max_length=254, verbose_name='Application Name')),
('details_url', models.TextField(blank=True, null=True, verbose_name='Application Details URL')),
('full_name', models.TextField(blank=True, null=True, verbose_name='Application Fully Qualified Name')),
('status', models.CharField(db_index=True, max_length=254, verbose_name='Node Status')),
('status_orion_id', models.BigIntegerField(db_index=True, default=0, help_text='This will probably changes but that is how they do it for the moment; boohoo', verbose_name='Orion Node Status Id')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='orion_integration_orionapmapplication_created_by_related', to=settings.AUTH_USER_MODEL, verbose_name='created by')),
('node', models.ForeignKey(help_text='The node where the application is running', on_delete=django.db.models.deletion.CASCADE, to='orion_integration.OrionNode', verbose_name='Orion Node')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='orion_integration_orionapmapplication_updated_by_related', to=settings.AUTH_USER_MODEL, verbose_name='updated by')),
],
options={
'verbose_name': 'Orion Application',
'verbose_name_plural': 'Orion Applications',
},
),
]
|
normal
|
{
"blob_id": "5791c1efa82a1e02ca067e1db776e9d466a111e2",
"index": 1765,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('orion_integration', '0005_auto_20180814_1747')]\n operations = [migrations.CreateModel(name='OrionAPMApplication', fields\n =[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_on', models.\n DateTimeField(auto_now_add=True, db_index=True, help_text=\n 'object creation time stamp', verbose_name='created on')), (\n 'updated_on', models.DateTimeField(auto_now_add=True, db_index=True,\n help_text='object update time stamp', verbose_name='updated on')),\n ('enabled', models.BooleanField(db_index=True, default=True,\n verbose_name='enabled')), ('notes', models.TextField(blank=True,\n null=True, verbose_name='notes')), ('orion_id', models.\n BigIntegerField(db_index=True, help_text=\n 'Use the value in this field to query the Orion server', unique=\n True, verbose_name='Orion Object Id')), ('application_name', models\n .CharField(db_index=True, help_text=\n 'The application name as reported by Orion.APM.Application',\n max_length=254, verbose_name='Application Name')), ('details_url',\n models.TextField(blank=True, null=True, verbose_name=\n 'Application Details URL')), ('full_name', models.TextField(blank=\n True, null=True, verbose_name='Application Fully Qualified Name')),\n ('status', models.CharField(db_index=True, max_length=254,\n verbose_name='Node Status')), ('status_orion_id', models.\n BigIntegerField(db_index=True, default=0, help_text=\n 'This will probably changes but that is how they do it for the moment; boohoo'\n , verbose_name='Orion Node Status Id')), ('created_by', models.\n ForeignKey(on_delete=django.db.models.deletion.PROTECT,\n related_name=\n 'orion_integration_orionapmapplication_created_by_related', to=\n settings.AUTH_USER_MODEL, verbose_name='created by')), ('node',\n models.ForeignKey(help_text=\n 'The node where the application is running', on_delete=django.db.\n models.deletion.CASCADE, to='orion_integration.OrionNode',\n verbose_name='Orion Node')), ('updated_by', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, related_name=\n 'orion_integration_orionapmapplication_updated_by_related', to=\n settings.AUTH_USER_MODEL, verbose_name='updated by'))], options={\n 'verbose_name': 'Orion Application', 'verbose_name_plural':\n 'Orion Applications'})]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('orion_integration', '0005_auto_20180814_1747')]\n operations = [migrations.CreateModel(name='OrionAPMApplication', fields\n =[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_on', models.\n DateTimeField(auto_now_add=True, db_index=True, help_text=\n 'object creation time stamp', verbose_name='created on')), (\n 'updated_on', models.DateTimeField(auto_now_add=True, db_index=True,\n help_text='object update time stamp', verbose_name='updated on')),\n ('enabled', models.BooleanField(db_index=True, default=True,\n verbose_name='enabled')), ('notes', models.TextField(blank=True,\n null=True, verbose_name='notes')), ('orion_id', models.\n BigIntegerField(db_index=True, help_text=\n 'Use the value in this field to query the Orion server', unique=\n True, verbose_name='Orion Object Id')), ('application_name', models\n .CharField(db_index=True, help_text=\n 'The application name as reported by Orion.APM.Application',\n max_length=254, verbose_name='Application Name')), ('details_url',\n models.TextField(blank=True, null=True, verbose_name=\n 'Application Details URL')), ('full_name', models.TextField(blank=\n True, null=True, verbose_name='Application Fully Qualified Name')),\n ('status', models.CharField(db_index=True, max_length=254,\n verbose_name='Node Status')), ('status_orion_id', models.\n BigIntegerField(db_index=True, default=0, help_text=\n 'This will probably changes but that is how they do it for the moment; boohoo'\n , verbose_name='Orion Node Status Id')), ('created_by', models.\n ForeignKey(on_delete=django.db.models.deletion.PROTECT,\n related_name=\n 'orion_integration_orionapmapplication_created_by_related', to=\n settings.AUTH_USER_MODEL, verbose_name='created by')), ('node',\n models.ForeignKey(help_text=\n 'The node where the application is running', on_delete=django.db.\n models.deletion.CASCADE, to='orion_integration.OrionNode',\n verbose_name='Orion Node')), ('updated_by', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, related_name=\n 'orion_integration_orionapmapplication_updated_by_related', to=\n settings.AUTH_USER_MODEL, verbose_name='updated by'))], options={\n 'verbose_name': 'Orion Application', 'verbose_name_plural':\n 'Orion Applications'})]\n",
"step-5": "# Generated by Django 2.0.7 on 2018-08-14 21:31\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('orion_integration', '0005_auto_20180814_1747'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='OrionAPMApplication',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_on', models.DateTimeField(auto_now_add=True, db_index=True, help_text='object creation time stamp', verbose_name='created on')),\n ('updated_on', models.DateTimeField(auto_now_add=True, db_index=True, help_text='object update time stamp', verbose_name='updated on')),\n ('enabled', models.BooleanField(db_index=True, default=True, verbose_name='enabled')),\n ('notes', models.TextField(blank=True, null=True, verbose_name='notes')),\n ('orion_id', models.BigIntegerField(db_index=True, help_text='Use the value in this field to query the Orion server', unique=True, verbose_name='Orion Object Id')),\n ('application_name', models.CharField(db_index=True, help_text='The application name as reported by Orion.APM.Application', max_length=254, verbose_name='Application Name')),\n ('details_url', models.TextField(blank=True, null=True, verbose_name='Application Details URL')),\n ('full_name', models.TextField(blank=True, null=True, verbose_name='Application Fully Qualified Name')),\n ('status', models.CharField(db_index=True, max_length=254, verbose_name='Node Status')),\n ('status_orion_id', models.BigIntegerField(db_index=True, default=0, help_text='This will probably changes but that is how they do it for the moment; boohoo', verbose_name='Orion Node Status Id')),\n ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='orion_integration_orionapmapplication_created_by_related', to=settings.AUTH_USER_MODEL, verbose_name='created by')),\n ('node', models.ForeignKey(help_text='The node where the application is running', on_delete=django.db.models.deletion.CASCADE, to='orion_integration.OrionNode', verbose_name='Orion Node')),\n ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='orion_integration_orionapmapplication_updated_by_related', to=settings.AUTH_USER_MODEL, verbose_name='updated by')),\n ],\n options={\n 'verbose_name': 'Orion Application',\n 'verbose_name_plural': 'Orion Applications',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def h1_wrap(func):
def func_wrapper(param):
return "<h1>"+func(param) + "</h1>"
return func_wrapper
@h1_wrap
def say_hi(name):
return "Hello, " + name.capitalize()
print(say_hi("Stephan"))
|
normal
|
{
"blob_id": "9c9005acb40e4b89ca215345361e21f08f984847",
"index": 5735,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@h1_wrap\ndef say_hi(name):\n return 'Hello, ' + name.capitalize()\n\n\n<mask token>\n",
"step-3": "def h1_wrap(func):\n\n def func_wrapper(param):\n return '<h1>' + func(param) + '</h1>'\n return func_wrapper\n\n\n@h1_wrap\ndef say_hi(name):\n return 'Hello, ' + name.capitalize()\n\n\n<mask token>\n",
"step-4": "def h1_wrap(func):\n\n def func_wrapper(param):\n return '<h1>' + func(param) + '</h1>'\n return func_wrapper\n\n\n@h1_wrap\ndef say_hi(name):\n return 'Hello, ' + name.capitalize()\n\n\nprint(say_hi('Stephan'))\n",
"step-5": "def h1_wrap(func):\n def func_wrapper(param):\n return \"<h1>\"+func(param) + \"</h1>\"\n return func_wrapper\n\n\n@h1_wrap\ndef say_hi(name):\n return \"Hello, \" + name.capitalize()\n\n\nprint(say_hi(\"Stephan\"))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from flask import Flask
from flask.ext.login import LoginManager
from config import basedir
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.openid import OpenID
from momentjs import momentjs
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
app.jinja_env.globals['momentjs'] = momentjs
from app import views, models
|
normal
|
{
"blob_id": "8c1bd4df5f33c433880d6a4becadf88fb922762b",
"index": 6379,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config.from_object('config')\n<mask token>\nlm.init_app(app)\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\nlm = LoginManager()\nlm.init_app(app)\napp.jinja_env.globals['momentjs'] = momentjs\n<mask token>\n",
"step-4": "import os\nfrom flask import Flask\nfrom flask.ext.login import LoginManager\nfrom config import basedir\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.openid import OpenID\nfrom momentjs import momentjs\napp = Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\nlm = LoginManager()\nlm.init_app(app)\napp.jinja_env.globals['momentjs'] = momentjs\nfrom app import views, models\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
text = input('Введите предложение: ')
x1 = text.index('с')
x2 = text.index('т')
if x1 > x2:
print("Бурква 'с' встречается позже")
else:
print("Бурква 'т' встречается позже")
<|reserved_special_token_1|>
'''
Дано предложение, в котором имеются буквы с и т. Определить, какая из них встречается
позже (при просмотре слова слева направо). Если таких букв несколько, то должны
учитываться последние из них. Оператор цикла с условием не использовать.
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
text = input("Введите предложение: ")
x1 = text.index("с")
x2 = text.index("т")
if x1 > x2:
print("Бурква 'с' встречается позже")
else:
print("Бурква 'т' встречается позже")
|
flexible
|
{
"blob_id": "4bad45f8c135463fadea9b3eed52ab045a51e8db",
"index": 2520,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n text = input('Введите предложение: ')\n x1 = text.index('с')\n x2 = text.index('т')\n if x1 > x2:\n print(\"Бурква 'с' встречается позже\")\n else:\n print(\"Бурква 'т' встречается позже\")\n",
"step-3": "'''\nДано предложение, в котором имеются буквы с и т. Определить, какая из них встречается\nпозже (при просмотре слова слева направо). Если таких букв несколько, то должны\nучитываться последние из них. Оператор цикла с условием не использовать.\n'''\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nif __name__ == '__main__':\n text = input(\"Введите предложение: \")\n\n x1 = text.index(\"с\")\n x2 = text.index(\"т\")\n if x1 > x2:\n print(\"Бурква 'с' встречается позже\")\n else:\n print(\"Бурква 'т' встречается позже\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AssetModelTestCase(AssetTestMixin, BaseTestCase):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AssetModelTestCase(AssetTestMixin, BaseTestCase):
def test_asset_get_absolute_url_method(self):
self._create_test_asset()
self.test_asset.get_absolute_url()
<|reserved_special_token_1|>
from mayan.apps.testing.tests.base import BaseTestCase
from .mixins import AssetTestMixin
class AssetModelTestCase(AssetTestMixin, BaseTestCase):
def test_asset_get_absolute_url_method(self):
self._create_test_asset()
self.test_asset.get_absolute_url()
|
flexible
|
{
"blob_id": "42c9e5039e2d5f784bf6405ea8bcaf7d6973ddcb",
"index": 6456,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AssetModelTestCase(AssetTestMixin, BaseTestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AssetModelTestCase(AssetTestMixin, BaseTestCase):\n\n def test_asset_get_absolute_url_method(self):\n self._create_test_asset()\n self.test_asset.get_absolute_url()\n",
"step-4": "from mayan.apps.testing.tests.base import BaseTestCase\nfrom .mixins import AssetTestMixin\n\n\nclass AssetModelTestCase(AssetTestMixin, BaseTestCase):\n\n def test_asset_get_absolute_url_method(self):\n self._create_test_asset()\n self.test_asset.get_absolute_url()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
<|reserved_special_token_0|>
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
<|reserved_special_token_0|>
if cfg.MULTIPLE_ARCHITECTURES:
best_architecture = []
best_regularizer = ''
best_activation_function = ''
best_precision = 0
best_recall = 0
best_f1 = 0
count_max = 0
counter = 0
architecture_list = []
for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):
prod = list(product(cfg.TEST_NOTES, repeat=i))
architecture_list.extend(prod)
count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg
.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)
with open('output/wrapper_test_mean.csv', 'a') as f:
f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n')
for architecture in architecture_list:
for regularizer in cfg.TEST_REGULARIZERS:
for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:
for class_weight in cfg.TEST_CLASS_WEIGHTS:
reset_keras()
print(str(counter) + '/' + str(count_max))
model = BuildModel(cfg, input_shape, True, list(
architecture), regularizer, activation_function)
model_trained = TrainModel(cfg, model,
training_features, training_targets, {(0): 1.0,
(1): class_weight})
precision, recall, f1 = EvaluateModelTest(cfg,
model_trained, test_features, test_targets)
if recall > best_recall:
best_precision = precision
best_recall = recall
best_f1 = f1
best_architecture = list(architecture)
best_regularizer = regularizer
best_activation_function = activation_function
la1 = list(architecture)[0]
la2 = 0
la3 = 0
la4 = 0
la5 = 0
if len(list(architecture)) >= 2:
la2 = list(architecture)[1]
if len(list(architecture)) >= 3:
la3 = list(architecture)[2]
if len(list(architecture)) >= 4:
la4 = list(architecture)[3]
if len(list(architecture)) >= 5:
la5 = list(architecture)[4]
f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +
',' + str(la4) + ',' + str(la5) + ',' + str(
class_weight) + ',' + regularizer + ',' +
activation_function + ',' + str(precision) +
',' + str(recall) + ',' + str(f1) + '\n')
counter += 1
print('BEST ARCHITECTURE:')
print(best_architecture)
print(best_regularizer)
print(best_activation_function)
print('precision: ' + str(best_precision) + ', recall: ' + str(
best_recall) + ', f1: ' + str(best_f1))
else:
reset_keras()
model = BuildModel(cfg, input_shape, False, 0, 0, 0)
model = TrainModel(cfg, model, training_features, training_targets, cfg
.CLASS_WEIGHT)
EvaluateModel(cfg, model, test_features, test_targets)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cfg = Config()
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
training_X, training_y, test_X, test_Y = FetchData(cfg)
training_features = np.array(training_X)
training_targets = np.array(training_y)
test_features = np.array(test_X)
test_targets = np.array(test_Y)
input_shape = len(training_features[0]),
if cfg.MULTIPLE_ARCHITECTURES:
best_architecture = []
best_regularizer = ''
best_activation_function = ''
best_precision = 0
best_recall = 0
best_f1 = 0
count_max = 0
counter = 0
architecture_list = []
for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):
prod = list(product(cfg.TEST_NOTES, repeat=i))
architecture_list.extend(prod)
count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg
.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)
with open('output/wrapper_test_mean.csv', 'a') as f:
f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n')
for architecture in architecture_list:
for regularizer in cfg.TEST_REGULARIZERS:
for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:
for class_weight in cfg.TEST_CLASS_WEIGHTS:
reset_keras()
print(str(counter) + '/' + str(count_max))
model = BuildModel(cfg, input_shape, True, list(
architecture), regularizer, activation_function)
model_trained = TrainModel(cfg, model,
training_features, training_targets, {(0): 1.0,
(1): class_weight})
precision, recall, f1 = EvaluateModelTest(cfg,
model_trained, test_features, test_targets)
if recall > best_recall:
best_precision = precision
best_recall = recall
best_f1 = f1
best_architecture = list(architecture)
best_regularizer = regularizer
best_activation_function = activation_function
la1 = list(architecture)[0]
la2 = 0
la3 = 0
la4 = 0
la5 = 0
if len(list(architecture)) >= 2:
la2 = list(architecture)[1]
if len(list(architecture)) >= 3:
la3 = list(architecture)[2]
if len(list(architecture)) >= 4:
la4 = list(architecture)[3]
if len(list(architecture)) >= 5:
la5 = list(architecture)[4]
f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +
',' + str(la4) + ',' + str(la5) + ',' + str(
class_weight) + ',' + regularizer + ',' +
activation_function + ',' + str(precision) +
',' + str(recall) + ',' + str(f1) + '\n')
counter += 1
print('BEST ARCHITECTURE:')
print(best_architecture)
print(best_regularizer)
print(best_activation_function)
print('precision: ' + str(best_precision) + ', recall: ' + str(
best_recall) + ', f1: ' + str(best_f1))
else:
reset_keras()
model = BuildModel(cfg, input_shape, False, 0, 0, 0)
model = TrainModel(cfg, model, training_features, training_targets, cfg
.CLASS_WEIGHT)
EvaluateModel(cfg, model, test_features, test_targets)
<|reserved_special_token_1|>
from config import Config
import numpy as np
from itertools import product
from sklearn.utils import shuffle
from sklearn.metrics import precision_recall_fscore_support
from keras import callbacks, regularizers
from keras.models import Sequential
from keras.layers import Dense, InputLayer
from keras import backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
from src.classification_data_tools import limit_negative_samples
import pickle
from tensorflow import set_random_seed
import tensorflow as tf
cfg = Config()
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer, activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS, callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS, class_weight=cw,
batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')
f1 = 2 * ((precision * recall) / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')
f1 = 2 * ((precision * recall) / (precision + recall))
return precision, recall, f1
#estimator = KerasClassifier(build_fn=model, epochs=4, batch_size=32, verbose=1)
#kfold = StratifiedKFold(n_splits=10, shuffle=True)
#results = cross_val_score(estimator, test_features, test_targets, cv=kfold)
#print("Results: %.2f%% (%.2f%%)" % (results.mean() * 100, results.std() * 100))
training_X, training_y, test_X, test_Y = FetchData(cfg)
training_features = np.array(training_X)
training_targets = np.array(training_y)
test_features = np.array(test_X)
test_targets = np.array(test_Y)
input_shape = (len(training_features[0]),)
if cfg.MULTIPLE_ARCHITECTURES:
best_architecture = []
best_regularizer = ''
best_activation_function = ''
best_precision = 0
best_recall = 0
best_f1 = 0
count_max = 0
counter = 0
architecture_list = []
for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):
prod = list(product(cfg.TEST_NOTES, repeat = i))
architecture_list.extend(prod)
count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)
with open('output/wrapper_test_mean.csv', 'a') as f:
f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n')
for architecture in architecture_list:
for regularizer in cfg.TEST_REGULARIZERS:
for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:
for class_weight in cfg.TEST_CLASS_WEIGHTS:
reset_keras()
print(str(counter) + '/' + str(count_max))
model = BuildModel(cfg, input_shape, True, list(architecture), regularizer, activation_function)
model_trained = TrainModel(cfg, model, training_features, training_targets, {0: 1., 1: class_weight})
precision, recall, f1 = EvaluateModelTest(cfg, model_trained, test_features, test_targets)
if recall > best_recall:
best_precision = precision
best_recall = recall
best_f1 = f1
best_architecture = list(architecture)
best_regularizer = regularizer
best_activation_function = activation_function
la1 = list(architecture)[0]
la2 = 0
la3 = 0
la4 = 0
la5 = 0
if len(list(architecture)) >= 2:
la2 = list(architecture)[1]
if len(list(architecture)) >= 3:
la3 = list(architecture)[2]
if len(list(architecture)) >= 4:
la4 = list(architecture)[3]
if len(list(architecture)) >= 5:
la5 = list(architecture)[4]
f.write(str(la1) + ',' + str(la2) + ',' + str(la3) + ',' + str(la4) + ',' + str(la5) + ',' + str(class_weight) + ',' + regularizer + ',' + activation_function + ',' + str(precision) + ',' + str(recall) + ',' + str(f1) + '\n')
counter += 1
print('BEST ARCHITECTURE:')
print(best_architecture)
print(best_regularizer)
print(best_activation_function)
print('precision: ' + str(best_precision) + ', recall: ' + str(best_recall) + ', f1: ' + str(best_f1))
else:
reset_keras()
model = BuildModel(cfg, input_shape, False, 0, 0, 0)
model = TrainModel(cfg, model, training_features, training_targets, cfg.CLASS_WEIGHT)
EvaluateModel(cfg, model, test_features, test_targets)
|
flexible
|
{
"blob_id": "957e18b2536cda69ba1db571d0308d5e392fe488",
"index": 2166,
"step-1": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\n<mask token>\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n count_max = 0\n counter = 0\n architecture_list = []\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat=i))\n architecture_list.extend(prod)\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg\n .TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n print(str(counter) + '/' + str(count_max))\n model = BuildModel(cfg, input_shape, True, list(\n architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model,\n training_features, training_targets, {(0): 1.0,\n (1): class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg,\n model_trained, test_features, test_targets)\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +\n ',' + str(la4) + ',' + str(la5) + ',' + str(\n class_weight) + ',' + regularizer + ',' +\n activation_function + ',' + str(precision) +\n ',' + str(recall) + ',' + str(f1) + '\\n')\n counter += 1\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(\n best_recall) + ', f1: ' + str(best_f1))\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg\n .CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n",
"step-4": "<mask token>\ncfg = Config()\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\ntraining_X, training_y, test_X, test_Y = FetchData(cfg)\ntraining_features = np.array(training_X)\ntraining_targets = np.array(training_y)\ntest_features = np.array(test_X)\ntest_targets = np.array(test_Y)\ninput_shape = len(training_features[0]),\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n count_max = 0\n counter = 0\n architecture_list = []\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat=i))\n architecture_list.extend(prod)\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg\n .TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n print(str(counter) + '/' + str(count_max))\n model = BuildModel(cfg, input_shape, True, list(\n architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model,\n training_features, training_targets, {(0): 1.0,\n (1): class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg,\n model_trained, test_features, test_targets)\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +\n ',' + str(la4) + ',' + str(la5) + ',' + str(\n class_weight) + ',' + regularizer + ',' +\n activation_function + ',' + str(precision) +\n ',' + str(recall) + ',' + str(f1) + '\\n')\n counter += 1\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(\n best_recall) + ', f1: ' + str(best_f1))\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg\n .CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n",
"step-5": "from config import Config\nimport numpy as np\nfrom itertools import product\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom keras import callbacks, regularizers\nfrom keras.models import Sequential\nfrom keras.layers import Dense, InputLayer\nfrom keras import backend as K\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import StratifiedKFold, cross_val_score\nfrom src.classification_data_tools import limit_negative_samples\nimport pickle\nfrom tensorflow import set_random_seed\nimport tensorflow as tf\n\ncfg = Config()\n\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO)\n\n return training_features, training_targets, test_features, test_targets\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer, activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n\n model = Sequential()\n\n model.add(InputLayer(input_shape))\n\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n\n return model\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS, callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS, class_weight=cw,\n batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n\n return model\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n\n precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')\n f1 = 2 * ((precision * recall) / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n\n precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')\n f1 = 2 * ((precision * recall) / (precision + recall))\n return precision, recall, f1\n\n #estimator = KerasClassifier(build_fn=model, epochs=4, batch_size=32, verbose=1)\n #kfold = StratifiedKFold(n_splits=10, shuffle=True)\n #results = cross_val_score(estimator, test_features, test_targets, cv=kfold)\n #print(\"Results: %.2f%% (%.2f%%)\" % (results.mean() * 100, results.std() * 100))\n\n\ntraining_X, training_y, test_X, test_Y = FetchData(cfg)\n\ntraining_features = np.array(training_X)\ntraining_targets = np.array(training_y)\ntest_features = np.array(test_X)\ntest_targets = np.array(test_Y)\n\ninput_shape = (len(training_features[0]),)\n\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n\n count_max = 0\n counter = 0\n\n architecture_list = []\n\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat = i))\n architecture_list.extend(prod)\n\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n\n print(str(counter) + '/' + str(count_max))\n\n model = BuildModel(cfg, input_shape, True, list(architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model, training_features, training_targets, {0: 1., 1: class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg, model_trained, test_features, test_targets)\n\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n\n\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) + ',' + str(la4) + ',' + str(la5) + ',' + str(class_weight) + ',' + regularizer + ',' + activation_function + ',' + str(precision) + ',' + str(recall) + ',' + str(f1) + '\\n')\n\n counter += 1\n\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(best_recall) + ', f1: ' + str(best_f1))\n\n\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg.CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n\n\n\n\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
M, N = 3, 16
prime = set(range(M, N + 1))
for i in range(2, N + 1):
prime -= set(range(i ** 2, N + 1, i))
for number in prime:
print(number)
|
normal
|
{
"blob_id": "d190eb27ea146cf99ac7f8d29fb5f769121af60e",
"index": 9437,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2, N + 1):\n prime -= set(range(i ** 2, N + 1, i))\nfor number in prime:\n print(number)\n",
"step-3": "M, N = 3, 16\nprime = set(range(M, N + 1))\nfor i in range(2, N + 1):\n prime -= set(range(i ** 2, N + 1, i))\nfor number in prime:\n print(number)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@pytest.fixture
def client():
app.app.config['TESTING'] = True
with app.app.test_client() as client:
yield client
def test_query_missing_args(client):
response = client.get('/data/query')
assert 'errors' in response.json and '400' in response.status
def test_query_get_json(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('select * from test')}")
assert len(response.json) == 2
def test_query_post_json(client):
response = client.post('/data/query', json={'sql': 'select * from test'})
assert len(response.json) == 2
<|reserved_special_token_0|>
def test_query_bad_sql_insert(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
def test_query_bad_sql_delete(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def client():
app.app.config['TESTING'] = True
with app.app.test_client() as client:
yield client
def test_query_missing_args(client):
response = client.get('/data/query')
assert 'errors' in response.json and '400' in response.status
def test_query_get_json(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('select * from test')}")
assert len(response.json) == 2
def test_query_post_json(client):
response = client.post('/data/query', json={'sql': 'select * from test'})
assert len(response.json) == 2
<|reserved_special_token_0|>
def test_query_bad_sql_insert(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
def test_query_bad_sql_delete(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
def test_query_bad_sql_update(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('update test set col0 = 1')}")
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def client():
app.app.config['TESTING'] = True
with app.app.test_client() as client:
yield client
def test_query_missing_args(client):
response = client.get('/data/query')
assert 'errors' in response.json and '400' in response.status
def test_query_get_json(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('select * from test')}")
assert len(response.json) == 2
def test_query_post_json(client):
response = client.post('/data/query', json={'sql': 'select * from test'})
assert len(response.json) == 2
def test_query_get_csv(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('select * from test')}&format=csv"
)
text = response.data.decode()
assert len(text) > 0 and 'col0' in text
def test_query_post_csv(client):
response = client.post('/data/query', json={'sql': 'select * from test',
'format': 'csv'})
text = response.data.decode()
assert len(text) > 0 and 'col0' in text
def test_query_bad_sql_insert(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
def test_query_bad_sql_delete(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
def test_query_bad_sql_update(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('update test set col0 = 1')}")
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
<|reserved_special_token_1|>
import pytest
import app
import urllib.parse
@pytest.fixture
def client():
app.app.config['TESTING'] = True
with app.app.test_client() as client:
yield client
def test_query_missing_args(client):
response = client.get('/data/query')
assert 'errors' in response.json and '400' in response.status
def test_query_get_json(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('select * from test')}")
assert len(response.json) == 2
def test_query_post_json(client):
response = client.post('/data/query', json={'sql': 'select * from test'})
assert len(response.json) == 2
def test_query_get_csv(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('select * from test')}&format=csv"
)
text = response.data.decode()
assert len(text) > 0 and 'col0' in text
def test_query_post_csv(client):
response = client.post('/data/query', json={'sql': 'select * from test',
'format': 'csv'})
text = response.data.decode()
assert len(text) > 0 and 'col0' in text
def test_query_bad_sql_insert(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
def test_query_bad_sql_delete(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}"
)
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
def test_query_bad_sql_update(client):
response = client.get(
f"/data/query?sql={urllib.parse.quote('update test set col0 = 1')}")
assert 'Illegal SQL' in response.json['message'
] and 400 == response.status_code
<|reserved_special_token_1|>
import pytest
import app
import urllib.parse
@pytest.fixture
def client():
app.app.config['TESTING'] = True
with app.app.test_client() as client:
yield client
def test_query_missing_args(client):
response = client.get('/data/query')
assert 'errors' in response.json and '400' in response.status
def test_query_get_json(client):
response = client.get(f'/data/query?sql={urllib.parse.quote("select * from test")}')
assert len(response.json) == 2
def test_query_post_json(client):
response = client.post('/data/query', json={'sql': 'select * from test'})
assert len(response.json) == 2
def test_query_get_csv(client):
response = client.get(f'/data/query?sql={urllib.parse.quote("select * from test")}&format=csv')
text = response.data.decode()
assert len(text) > 0 and 'col0' in text
def test_query_post_csv(client):
response = client.post('/data/query', json={'sql': 'select * from test', 'format': 'csv'})
text = response.data.decode()
assert len(text) > 0 and 'col0' in text
def test_query_bad_sql_insert(client):
response = client.get(f'/data/query?sql={urllib.parse.quote("insert into test (col0) values (1)")}')
assert 'Illegal SQL' in response.json['message'] and 400 == response.status_code
def test_query_bad_sql_delete(client):
response = client.get(f'/data/query?sql={urllib.parse.quote("delete from test where col0 = 1")}')
assert 'Illegal SQL' in response.json['message'] and 400 == response.status_code
def test_query_bad_sql_update(client):
response = client.get(f'/data/query?sql={urllib.parse.quote("update test set col0 = 1")}')
assert 'Illegal SQL' in response.json['message'] and 400 == response.status_code
|
flexible
|
{
"blob_id": "a598da0a749fcc5a6719cec31ede0eb13fab228e",
"index": 3171,
"step-1": "<mask token>\n\n\n@pytest.fixture\ndef client():\n app.app.config['TESTING'] = True\n with app.app.test_client() as client:\n yield client\n\n\ndef test_query_missing_args(client):\n response = client.get('/data/query')\n assert 'errors' in response.json and '400' in response.status\n\n\ndef test_query_get_json(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('select * from test')}\")\n assert len(response.json) == 2\n\n\ndef test_query_post_json(client):\n response = client.post('/data/query', json={'sql': 'select * from test'})\n assert len(response.json) == 2\n\n\n<mask token>\n\n\ndef test_query_bad_sql_insert(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\ndef test_query_bad_sql_delete(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef client():\n app.app.config['TESTING'] = True\n with app.app.test_client() as client:\n yield client\n\n\ndef test_query_missing_args(client):\n response = client.get('/data/query')\n assert 'errors' in response.json and '400' in response.status\n\n\ndef test_query_get_json(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('select * from test')}\")\n assert len(response.json) == 2\n\n\ndef test_query_post_json(client):\n response = client.post('/data/query', json={'sql': 'select * from test'})\n assert len(response.json) == 2\n\n\n<mask token>\n\n\ndef test_query_bad_sql_insert(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\ndef test_query_bad_sql_delete(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\ndef test_query_bad_sql_update(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('update test set col0 = 1')}\")\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n",
"step-3": "<mask token>\n\n\n@pytest.fixture\ndef client():\n app.app.config['TESTING'] = True\n with app.app.test_client() as client:\n yield client\n\n\ndef test_query_missing_args(client):\n response = client.get('/data/query')\n assert 'errors' in response.json and '400' in response.status\n\n\ndef test_query_get_json(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('select * from test')}\")\n assert len(response.json) == 2\n\n\ndef test_query_post_json(client):\n response = client.post('/data/query', json={'sql': 'select * from test'})\n assert len(response.json) == 2\n\n\ndef test_query_get_csv(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('select * from test')}&format=csv\"\n )\n text = response.data.decode()\n assert len(text) > 0 and 'col0' in text\n\n\ndef test_query_post_csv(client):\n response = client.post('/data/query', json={'sql': 'select * from test',\n 'format': 'csv'})\n text = response.data.decode()\n assert len(text) > 0 and 'col0' in text\n\n\ndef test_query_bad_sql_insert(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\ndef test_query_bad_sql_delete(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\ndef test_query_bad_sql_update(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('update test set col0 = 1')}\")\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n",
"step-4": "import pytest\nimport app\nimport urllib.parse\n\n\n@pytest.fixture\ndef client():\n app.app.config['TESTING'] = True\n with app.app.test_client() as client:\n yield client\n\n\ndef test_query_missing_args(client):\n response = client.get('/data/query')\n assert 'errors' in response.json and '400' in response.status\n\n\ndef test_query_get_json(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('select * from test')}\")\n assert len(response.json) == 2\n\n\ndef test_query_post_json(client):\n response = client.post('/data/query', json={'sql': 'select * from test'})\n assert len(response.json) == 2\n\n\ndef test_query_get_csv(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('select * from test')}&format=csv\"\n )\n text = response.data.decode()\n assert len(text) > 0 and 'col0' in text\n\n\ndef test_query_post_csv(client):\n response = client.post('/data/query', json={'sql': 'select * from test',\n 'format': 'csv'})\n text = response.data.decode()\n assert len(text) > 0 and 'col0' in text\n\n\ndef test_query_bad_sql_insert(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('insert into test (col0) values (1)')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\ndef test_query_bad_sql_delete(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('delete from test where col0 = 1')}\"\n )\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n\n\ndef test_query_bad_sql_update(client):\n response = client.get(\n f\"/data/query?sql={urllib.parse.quote('update test set col0 = 1')}\")\n assert 'Illegal SQL' in response.json['message'\n ] and 400 == response.status_code\n",
"step-5": "import pytest\nimport app\nimport urllib.parse\n\n\n@pytest.fixture\ndef client():\n app.app.config['TESTING'] = True\n\n with app.app.test_client() as client:\n yield client\n\n\ndef test_query_missing_args(client):\n response = client.get('/data/query')\n assert 'errors' in response.json and '400' in response.status\n\n\ndef test_query_get_json(client):\n response = client.get(f'/data/query?sql={urllib.parse.quote(\"select * from test\")}')\n assert len(response.json) == 2\n\n\ndef test_query_post_json(client):\n response = client.post('/data/query', json={'sql': 'select * from test'})\n assert len(response.json) == 2\n\n\ndef test_query_get_csv(client):\n response = client.get(f'/data/query?sql={urllib.parse.quote(\"select * from test\")}&format=csv')\n text = response.data.decode()\n assert len(text) > 0 and 'col0' in text\n\n\ndef test_query_post_csv(client):\n response = client.post('/data/query', json={'sql': 'select * from test', 'format': 'csv'})\n text = response.data.decode()\n assert len(text) > 0 and 'col0' in text\n\n\ndef test_query_bad_sql_insert(client):\n response = client.get(f'/data/query?sql={urllib.parse.quote(\"insert into test (col0) values (1)\")}')\n assert 'Illegal SQL' in response.json['message'] and 400 == response.status_code\n\n\ndef test_query_bad_sql_delete(client):\n response = client.get(f'/data/query?sql={urllib.parse.quote(\"delete from test where col0 = 1\")}')\n assert 'Illegal SQL' in response.json['message'] and 400 == response.status_code\n\n\ndef test_query_bad_sql_update(client):\n response = client.get(f'/data/query?sql={urllib.parse.quote(\"update test set col0 = 1\")}')\n assert 'Illegal SQL' in response.json['message'] and 400 == response.status_code\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
# https://stackoverflow.com/questions/69473844/can-you-calculate-the-size-of-a-text-annotation-in-matplotlib
from matplotlib.figure import Figure as mpl_Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas
fig = mpl_Figure()
x, y, text = 5, 7, 'My label text'
fig.gca().plot(x, y, 'k.')
canvas = mpl_Canvas(fig)
t = fig.gca().text(x, y, text, color='red')
canvas.draw()
bbox = t.get_window_extent(renderer = canvas.get_renderer())
fig.gca().plot(
[bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0],
[bbox.y0, bbox.y0, bbox.y1, bbox.y1, bbox.y0],
'k:',
transform=None)
canvas.print_figure("img.png")
|
normal
|
{
"blob_id": "c87f9885e96abdd32df68f9fe1942b2782bd5b96",
"index": 8149,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfig.gca().plot(x, y, 'k.')\n<mask token>\ncanvas.draw()\n<mask token>\nfig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,\n bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)\ncanvas.print_figure('img.png')\n",
"step-3": "<mask token>\nfig = mpl_Figure()\nx, y, text = 5, 7, 'My label text'\nfig.gca().plot(x, y, 'k.')\ncanvas = mpl_Canvas(fig)\nt = fig.gca().text(x, y, text, color='red')\ncanvas.draw()\nbbox = t.get_window_extent(renderer=canvas.get_renderer())\nfig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,\n bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)\ncanvas.print_figure('img.png')\n",
"step-4": "from matplotlib.figure import Figure as mpl_Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas\nfig = mpl_Figure()\nx, y, text = 5, 7, 'My label text'\nfig.gca().plot(x, y, 'k.')\ncanvas = mpl_Canvas(fig)\nt = fig.gca().text(x, y, text, color='red')\ncanvas.draw()\nbbox = t.get_window_extent(renderer=canvas.get_renderer())\nfig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,\n bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)\ncanvas.print_figure('img.png')\n",
"step-5": "# https://stackoverflow.com/questions/69473844/can-you-calculate-the-size-of-a-text-annotation-in-matplotlib\n\nfrom matplotlib.figure import Figure as mpl_Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas\n\nfig = mpl_Figure()\nx, y, text = 5, 7, 'My label text'\n\nfig.gca().plot(x, y, 'k.')\ncanvas = mpl_Canvas(fig)\nt = fig.gca().text(x, y, text, color='red')\ncanvas.draw()\n\nbbox = t.get_window_extent(renderer = canvas.get_renderer())\nfig.gca().plot(\n [bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0],\n [bbox.y0, bbox.y0, bbox.y1, bbox.y1, bbox.y0],\n 'k:',\n transform=None)\ncanvas.print_figure(\"img.png\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def parse_slack_message_object(message_obj):
"""parse user_name/channel_name out of slack controller
Notes:
`slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]
Args:
message_obj (:obj:`slackbot.message`): response object for slack
Returns:
dict: message data
"""
metadata = dict(message_obj._body)
try:
metadata['channel_name'] = message_obj._client.channels[metadata[
'channel']]['name']
except KeyError:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.
_client.users[metadata['user']]['name'])
metadata['user_name'] = message_obj._client.users[metadata['user']]['name']
metadata['team_name'] = message_obj._client.login_data['team']['name']
return metadata
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_slack_message_object(message_obj):
"""parse user_name/channel_name out of slack controller
Notes:
`slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]
Args:
message_obj (:obj:`slackbot.message`): response object for slack
Returns:
dict: message data
"""
metadata = dict(message_obj._body)
try:
metadata['channel_name'] = message_obj._client.channels[metadata[
'channel']]['name']
except KeyError:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.
_client.users[metadata['user']]['name'])
metadata['user_name'] = message_obj._client.users[metadata['user']]['name']
metadata['team_name'] = message_obj._client.login_data['team']['name']
return metadata
def parse_discord_context_object(context_obj):
"""parse user_name/channel_name out of discord controller
Args:
context_obj (:obj:`discord.context`): response object for discord
Returns:
dict: standardized message data
"""
metadata = dict()
metadata['user_name'] = context_obj.message.author.name
metadata['team_name'] = context_obj.message.server.name
try:
metadata['channel_name'] = context_obj.message.channel.name
except Exception:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.
message.author.name)
return metadata
<|reserved_special_token_1|>
<|reserved_special_token_0|>
HERE = path.abspath(path.dirname(__file__))
PP = pprint.PrettyPrinter(indent=2)
def parse_slack_message_object(message_obj):
"""parse user_name/channel_name out of slack controller
Notes:
`slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]
Args:
message_obj (:obj:`slackbot.message`): response object for slack
Returns:
dict: message data
"""
metadata = dict(message_obj._body)
try:
metadata['channel_name'] = message_obj._client.channels[metadata[
'channel']]['name']
except KeyError:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.
_client.users[metadata['user']]['name'])
metadata['user_name'] = message_obj._client.users[metadata['user']]['name']
metadata['team_name'] = message_obj._client.login_data['team']['name']
return metadata
def parse_discord_context_object(context_obj):
"""parse user_name/channel_name out of discord controller
Args:
context_obj (:obj:`discord.context`): response object for discord
Returns:
dict: standardized message data
"""
metadata = dict()
metadata['user_name'] = context_obj.message.author.name
metadata['team_name'] = context_obj.message.server.name
try:
metadata['channel_name'] = context_obj.message.channel.name
except Exception:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.
message.author.name)
return metadata
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from os import path
import pprint
HERE = path.abspath(path.dirname(__file__))
PP = pprint.PrettyPrinter(indent=2)
def parse_slack_message_object(message_obj):
"""parse user_name/channel_name out of slack controller
Notes:
`slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]
Args:
message_obj (:obj:`slackbot.message`): response object for slack
Returns:
dict: message data
"""
metadata = dict(message_obj._body)
try:
metadata['channel_name'] = message_obj._client.channels[metadata[
'channel']]['name']
except KeyError:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.
_client.users[metadata['user']]['name'])
metadata['user_name'] = message_obj._client.users[metadata['user']]['name']
metadata['team_name'] = message_obj._client.login_data['team']['name']
return metadata
def parse_discord_context_object(context_obj):
"""parse user_name/channel_name out of discord controller
Args:
context_obj (:obj:`discord.context`): response object for discord
Returns:
dict: standardized message data
"""
metadata = dict()
metadata['user_name'] = context_obj.message.author.name
metadata['team_name'] = context_obj.message.server.name
try:
metadata['channel_name'] = context_obj.message.channel.name
except Exception:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.
message.author.name)
return metadata
<|reserved_special_token_1|>
"""slack_utils.py: slack-specific utilities"""
from os import path
import pprint
HERE = path.abspath(path.dirname(__file__))
PP = pprint.PrettyPrinter(indent=2)
def parse_slack_message_object(message_obj):
"""parse user_name/channel_name out of slack controller
Notes:
`slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]
Args:
message_obj (:obj:`slackbot.message`): response object for slack
Returns:
dict: message data
"""
metadata = dict(message_obj._body)
try:
metadata['channel_name'] = message_obj._client.channels[metadata['channel']]['name']
except KeyError:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(
message_obj._client.users[metadata['user']]['name']
)
metadata['user_name'] = message_obj._client.users[metadata['user']]['name']
metadata['team_name'] = message_obj._client.login_data['team']['name']
return metadata
def parse_discord_context_object(context_obj):
"""parse user_name/channel_name out of discord controller
Args:
context_obj (:obj:`discord.context`): response object for discord
Returns:
dict: standardized message data
"""
metadata = dict() # TODO: all context_obj.message.{children}.name values
metadata['user_name'] = context_obj.message.author.name
metadata['team_name'] = context_obj.message.server.name
try:
metadata['channel_name'] = context_obj.message.channel.name
except Exception:
metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.message.author.name)
return metadata
|
flexible
|
{
"blob_id": "2df2cccc22aba2104ab15820e13d304addf83f63",
"index": 7163,
"step-1": "<mask token>\n\n\ndef parse_slack_message_object(message_obj):\n \"\"\"parse user_name/channel_name out of slack controller\n\n Notes:\n `slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]\n\n Args:\n message_obj (:obj:`slackbot.message`): response object for slack\n\n Returns:\n dict: message data\n\n \"\"\"\n metadata = dict(message_obj._body)\n try:\n metadata['channel_name'] = message_obj._client.channels[metadata[\n 'channel']]['name']\n except KeyError:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.\n _client.users[metadata['user']]['name'])\n metadata['user_name'] = message_obj._client.users[metadata['user']]['name']\n metadata['team_name'] = message_obj._client.login_data['team']['name']\n return metadata\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_slack_message_object(message_obj):\n \"\"\"parse user_name/channel_name out of slack controller\n\n Notes:\n `slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]\n\n Args:\n message_obj (:obj:`slackbot.message`): response object for slack\n\n Returns:\n dict: message data\n\n \"\"\"\n metadata = dict(message_obj._body)\n try:\n metadata['channel_name'] = message_obj._client.channels[metadata[\n 'channel']]['name']\n except KeyError:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.\n _client.users[metadata['user']]['name'])\n metadata['user_name'] = message_obj._client.users[metadata['user']]['name']\n metadata['team_name'] = message_obj._client.login_data['team']['name']\n return metadata\n\n\ndef parse_discord_context_object(context_obj):\n \"\"\"parse user_name/channel_name out of discord controller\n\n Args:\n context_obj (:obj:`discord.context`): response object for discord\n\n Returns:\n dict: standardized message data\n\n \"\"\"\n metadata = dict()\n metadata['user_name'] = context_obj.message.author.name\n metadata['team_name'] = context_obj.message.server.name\n try:\n metadata['channel_name'] = context_obj.message.channel.name\n except Exception:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.\n message.author.name)\n return metadata\n",
"step-3": "<mask token>\nHERE = path.abspath(path.dirname(__file__))\nPP = pprint.PrettyPrinter(indent=2)\n\n\ndef parse_slack_message_object(message_obj):\n \"\"\"parse user_name/channel_name out of slack controller\n\n Notes:\n `slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]\n\n Args:\n message_obj (:obj:`slackbot.message`): response object for slack\n\n Returns:\n dict: message data\n\n \"\"\"\n metadata = dict(message_obj._body)\n try:\n metadata['channel_name'] = message_obj._client.channels[metadata[\n 'channel']]['name']\n except KeyError:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.\n _client.users[metadata['user']]['name'])\n metadata['user_name'] = message_obj._client.users[metadata['user']]['name']\n metadata['team_name'] = message_obj._client.login_data['team']['name']\n return metadata\n\n\ndef parse_discord_context_object(context_obj):\n \"\"\"parse user_name/channel_name out of discord controller\n\n Args:\n context_obj (:obj:`discord.context`): response object for discord\n\n Returns:\n dict: standardized message data\n\n \"\"\"\n metadata = dict()\n metadata['user_name'] = context_obj.message.author.name\n metadata['team_name'] = context_obj.message.server.name\n try:\n metadata['channel_name'] = context_obj.message.channel.name\n except Exception:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.\n message.author.name)\n return metadata\n",
"step-4": "<mask token>\nfrom os import path\nimport pprint\nHERE = path.abspath(path.dirname(__file__))\nPP = pprint.PrettyPrinter(indent=2)\n\n\ndef parse_slack_message_object(message_obj):\n \"\"\"parse user_name/channel_name out of slack controller\n\n Notes:\n `slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]\n\n Args:\n message_obj (:obj:`slackbot.message`): response object for slack\n\n Returns:\n dict: message data\n\n \"\"\"\n metadata = dict(message_obj._body)\n try:\n metadata['channel_name'] = message_obj._client.channels[metadata[\n 'channel']]['name']\n except KeyError:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(message_obj.\n _client.users[metadata['user']]['name'])\n metadata['user_name'] = message_obj._client.users[metadata['user']]['name']\n metadata['team_name'] = message_obj._client.login_data['team']['name']\n return metadata\n\n\ndef parse_discord_context_object(context_obj):\n \"\"\"parse user_name/channel_name out of discord controller\n\n Args:\n context_obj (:obj:`discord.context`): response object for discord\n\n Returns:\n dict: standardized message data\n\n \"\"\"\n metadata = dict()\n metadata['user_name'] = context_obj.message.author.name\n metadata['team_name'] = context_obj.message.server.name\n try:\n metadata['channel_name'] = context_obj.message.channel.name\n except Exception:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.\n message.author.name)\n return metadata\n",
"step-5": "\"\"\"slack_utils.py: slack-specific utilities\"\"\"\nfrom os import path\nimport pprint\n\nHERE = path.abspath(path.dirname(__file__))\nPP = pprint.PrettyPrinter(indent=2)\n\ndef parse_slack_message_object(message_obj):\n \"\"\"parse user_name/channel_name out of slack controller\n\n Notes:\n `slackbot.message`.keys(): [type, channel, user, text, ts, source_team, team]\n\n Args:\n message_obj (:obj:`slackbot.message`): response object for slack\n\n Returns:\n dict: message data\n\n \"\"\"\n metadata = dict(message_obj._body)\n try:\n metadata['channel_name'] = message_obj._client.channels[metadata['channel']]['name']\n except KeyError:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(\n message_obj._client.users[metadata['user']]['name']\n )\n metadata['user_name'] = message_obj._client.users[metadata['user']]['name']\n metadata['team_name'] = message_obj._client.login_data['team']['name']\n\n return metadata\n\ndef parse_discord_context_object(context_obj):\n \"\"\"parse user_name/channel_name out of discord controller\n\n Args:\n context_obj (:obj:`discord.context`): response object for discord\n\n Returns:\n dict: standardized message data\n\n \"\"\"\n metadata = dict() # TODO: all context_obj.message.{children}.name values\n metadata['user_name'] = context_obj.message.author.name\n metadata['team_name'] = context_obj.message.server.name\n try:\n metadata['channel_name'] = context_obj.message.channel.name\n except Exception:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.message.author.name)\n\n return metadata\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.animation as animation
import pylab
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
class Hexapod:
def __init__(self, axis):
"""
Инициализация начальных параметров системы
:param axis: ось вращения системы (x, y, z)
"""
self.axis = axis # ось вращения тела
self.alpha = 30. # угол между приводами в точке крпления
self.beta = 30. # двугранный угол между приводами и платформой
self.L = 1.5 # длина привода
self.h_c = 2. # высота центра масс тела
self.r = 1. # радиус тела
self.m_p = 1000. # масса платформы
self.m = 4000. # масса тела
self.nu = 0.5 # частота
# тензор инерции тела для решения обратной задачи
self.J = np.array([[5000, 0, 0],
[0, 5000, 0],
[0, 0, 3500]], np.float32)
# начальное положение точек крепления приводов на ВЕРХНЕЙ платформе
self.A_0 = np.round([[self.r*np.sin(2*np.pi/3*i + np.pi),
self.r*np.cos(2*np.pi/3*i + np.pi),
-self.h_c] for i in range(-1, 2)], 5)
# точки крепления приводов на НИЖНЕЙ платформе (const)
self.B = np.array([])
# положение точек крепления приводов на ВЕРХНЕЙ платформе за все время
self.A = np.array([])
# длина каждого привода за все время
self.all_full_lengths = np.array([])
# плечи сил приводов за все время
self.r = np.array([])
# ампилитуда вращения, закон изменения угла и его производные по OX
self.fi_x_0 = 4. # градусы
self.fi_x = lambda t: self.fi_x_0 * np.sin(2*np.pi*self.nu*t)
self.prime_fi_x = lambda t: self.fi_x_0 * 2*np.pi*self.nu * np.cos(2*np.pi*self.nu*t)
self.prime2_fi_x = lambda t: -self.fi_x_0 * (2*np.pi*self.nu)**2 * np.sin(2*np.pi*self.nu*t)
# ампилитуда вращения, закон изменения угла и его производные по OY и OZ
self.fi_y_0 = 4. # градусы
self.fi_y = lambda t: self.fi_y_0 * np.sin(2*np.pi*self.nu*t)
self.prime_fi_y = lambda t: self.fi_y_0 * 2*np.pi*self.nu * np.cos(2*np.pi*self.nu*t)
self.prime2_fi_y = lambda t: -self.fi_y_0 * (2*np.pi*self.nu)**2 * np.sin(2*np.pi*self.nu*t)
# матрица поворота вокруг оси OX
self.R_matrix_x = lambda t: np.round([[np.cos(self.fi_x(t)*np.pi/180.), -np.sin(self.fi_x(t)*np.pi/180.), 0],
[np.sin(self.fi_x(t)*np.pi/180.), np.cos(self.fi_x(t)*np.pi/180.), 0],
[0, 0, 1]], 5)
# матрица поворота вокруг оси OY
self.R_matrix_y = lambda t: np.round([[1, 0, 0],
[0, np.cos(self.fi_y(t)*np.pi/180.), -np.sin(self.fi_y(t)*np.pi/180.)],
[0, np.sin(self.fi_y(t)*np.pi/180.), np.cos(self.fi_y(t)*np.pi/180.)]], 5)
# матрица поворота вокруг оси OZ
self.R_matrix_z = lambda t: np.round([[np.cos(self.fi_y(t)*np.pi/180.), 0, np.sin(self.fi_y(t)*np.pi/180.)],
[0, 1, 0],
[-np.sin(self.fi_y(t)*np.pi/180.), 0, np.cos(self.fi_y(t)*np.pi/180.)]], 5)
# для построения геометрии точек B
self.H = np.cos(np.pi/180. * self.beta) * np.cos(np.pi/180. * self.alpha/2) * self.L
self.h = self.L * np.cos(np.pi/180.*self.alpha/2) * np.sin(np.pi/180.*self.beta)
self.a = self.L * np.sin(np.pi/180.*self.alpha/2) # основание треугольника
self.r = (self.h**2 + self.a**2)**0.5
# отсчет времени для расчета законов
self.end_time = 2.0
self.start_time = 0.
self.steps = 100
self.time = np.linspace(self.start_time, self.end_time, self.steps)
# связь индексов нижней и верхней платформы
self.indexes = [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]]
def set_B(self):
"""
Расчет геометрии стенда - положение точек B_i.
Задается: self.B
:return: None
"""
for i, A in enumerate(self.A_0):
a = A[:2]
b1 = np.array([self.h, self.a])
b2 = np.array([self.h, - self.a])
kappa = np.array([[np.cos(np.pi / 180 * (30-120*i)), -np.sin(np.pi / 180 * (30-120*i))],
[np.sin(np.pi / 180 * (30-120*i)), np.cos(np.pi / 180 * (30-120*i))]])
p1 = np.dot(kappa, b1) + a
p2 = np.dot(kappa, b2) + a
p1 = np.append(p1, - self.H - self.h_c)
p2 = np.append(p2, - self.H - self.h_c)
self.B = np.hstack((self.B, p1))
self.B = np.hstack((self.B, p2))
self.B = self.B.reshape(6, 3)
# проверка длин приводов
i = 0
for A in self.A_0:
assert np.linalg.norm(np.subtract(A, self.B[i])) - self.L <= 1e-4
assert np.linalg.norm(np.subtract(A, self.B[i + 1])) - self.L <= 1e-4
# print(np.linalg.norm(np.subtract(A, self.B[i])))
# print(np.linalg.norm(np.subtract(A, self.B[i + 1])))
i += 2
def get_delta_L(self):
"""
Расчет геометрии положения точек A_i в каждый момент времени.
Отрисовка графиков изменения длин, скорости и ускорения для каждого привода по времени.
Задается: self.A, self.all_full_lengths, self.set_r
:return: None
"""
print('####################################################')
print('[INFO] solve delta L, Velocity, Acceleration ...')
print('####################################################')
# матрица поворота вокруг зазадной оси
R_matrix = None
if self.axis == 'x':
R_matrix = self.R_matrix_x
elif self.axis == 'y':
R_matrix = self.R_matrix_y
elif self.axis == 'z':
R_matrix = self.R_matrix_z
# удлинения каждого цилиндра за заданное время
dL_all = []
# длины всех цилиндров за все время
L_all = []
# координаты точек крепления на ВЕРХНЕЙ платформе
coordinates_A = []
# легенда для графиков
colors = {0: 'r+--', 1: 'rx-',
2: 'g+--', 3: 'gx-',
4: 'b+--', 5: 'bx-'}
for i, j in self.indexes:
print('[INFO] Поршень №{}'.format(j+1))
dl = [] # изменение длины поршня в момент времени t
l = [] # длины поршня в момент времени t
coord = [] # координата точки A_i в момент времени t
for t in self.time:
try:
A = np.dot(R_matrix(t), self.A_0[i])
except Exception:
print('Type error axis')
# текущая длина привода
L = np.linalg.norm(self.B[j] - A)
print(self.B[j] - A)
print(self.L, L)
# L = np.sum((A - self.B[j])**2)**0.5
print('dL[мм] = {:.5f}'.format((L - self.L) * 1e3))
l.append(L)
dl.append(round(((L - self.L) * 1e3), 5))
coord.append(A)
dL_all.append(dl)
L_all.append(l)
coordinates_A.append(coord)
# численно находим СКОРОСТЬ изменения длины приводов
v = [0.0]
for k in range(self.steps - 1):
v.append((dl[k+1] - dl[k]) / (self.time[k+1] - self.time[k]))
pylab.figure(1)
pylab.plot(self.time[5:], v[5:], colors[j])
print('[INFO] v_max =', np.max(np.abs(v[5:])))
# численно находим УСКОРЕНИЕ изменения длины приводов
a = [0.0]
for k in range(self.steps - 1):
a.append((v[k + 1] - v[k]) / (self.time[k + 1] - self.time[k]))
pylab.figure(2)
pylab.plot(self.time[5:], a[5:], colors[j])
print('[INFO] a_max =', np.max(np.abs(a[5:])))
print('****************************************************')
# легенда для графика со скоростями
pylab.figure(1)
pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)
pylab.title('Velocity')
pylab.xlabel('Time [s]')
pylab.ylabel('Velocity [mm/s]')
pylab.grid()
# plt.savefig("output/velocity_{}.png".format(self.axis))
# легенда для графика с ускорениями
pylab.figure(2)
pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)
pylab.title('Acceleration')
pylab.xlabel('Time [s]')
pylab.ylabel('Acceleration [mm/s^2]')
pylab.grid()
# pylab.savefig("output/acceleration_{}.png".format(self.axis))
# график удлинения каждого поршня
pylab.figure(3)
for i in range(6):
pylab.plot(self.time, dL_all[i], colors[i])
pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)
pylab.title('Delta length')
pylab.xlabel('Time [s]')
pylab.ylabel('dL [mm]')
pylab.grid()
# pylab.savefig("output/length_{}.png".format(self.axis))
plt.show()
# исключим повторение вершин
self.A = np.array(coordinates_A[0::2])
self.all_full_lengths = np.array(L_all)
self.set_r()
# покадровая отрисовка геометрии стенда
self.plot_3d_lines()
# self.plot_animate(coordinates_A)
def plot_3d_lines(self):
"""
Покадровая отрисовка геометрии стенда в 3D.
:return: None
"""
pylab.figure(figsize=(12, 10))
ax = pylab.axes(projection='3d')
colors = {0: 'r', 1: 'orange',
2: 'g', 3: 'olive',
4: 'b', 5: 'navy'}
markers = {0: '^', 1: '^',
2: 'o', 3: 'o',
4: '*', 5: '*'}
# задать легенду
for i, j in self.indexes:
df_A = pd.Series(data=self.A_0[i], index=['x', 'y', 'z'])
df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])
x = [df_A.x, df_B.x]
y = [df_A.y, df_B.y]
z = [df_A.z, df_B.z]
ax.scatter(x, y, z, c=colors[j], marker=markers[j], s=20.)
ax.legend([r'1', '2', '3', '4', '5', '6'], loc=0)
# indexes = [[0, 0], [1, 2], [2, 4]]
# построить смещения каждого поршня
for i, j in self.indexes:
k = 0
for (a, r) in zip(self.A[i], self.r[j]):
df_A = pd.Series(data=a, index=['x', 'y', 'z'])
df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])
df_r = pd.Series(data=r, index=['x', 'y', 'z'])
# геометрия длины цилиндров
x = [df_A.x, df_B.x]
y = [df_A.y, df_B.y]
z = [df_A.z, df_B.z]
# геометрия плеч сил
x1 = [df_r.x, 0]
y1 = [df_r.y, 0]
z1 = [df_r.z, 0]
# продолжение оси цилиндров
x2 = [df_r.x, df_B.x]
y2 = [df_r.y, df_B.y]
z2 = [df_r.z, df_B.z]
# частичная раскадровка
if k % int(self.steps-1) == 0:
# if k:
# ax.plot(x1, y1, z1, c=colors[j], marker=markers[j])
# ax.plot(x2, y2, z2, c='gray', marker='+')
ax.plot(x, y, z, c=colors[j], marker=markers[j])
# print('H_A =', z[0])
k += 1
# посторить смещение верхней плтаформы
for i in range(0, self.steps, 9):
a = np.array([self.A[0, i], self.A[1, i], self.A[2, i]])
df_A = pd.DataFrame(data=a, columns=['x', 'y', 'z'])
df_A = pd.concat((df_A, df_A.take([0])), axis=0)
ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='gray')
# отрисовать начальные положения верхней и нижней платформы
df_B = pd.DataFrame(data=self.B, columns=['x', 'y', 'z'])
df_B = pd.concat((df_B, df_B.take([0])))
df_A = pd.DataFrame(data=self.A_0, columns=['x', 'y', 'z'])
df_A = pd.concat((df_A, df_A.take([0])), axis=0)
ax.plot(df_B.x.values, df_B.y.values, df_B.z.values, c='black', linewidth=4.)
ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='black', linewidth=4.)
ax.view_init(30, -39)
# pylab.savefig("output/plot_3d_{}.png".format(self.axis))
plt.show()
def calculate_angles(self, l1, l2):
"""
Решение теоремы косинусов для поиска угла по трем сторонам
:param l1: прилежащая сторона к вычисляемому углу
:param l2: противолежащая сторона к углу
:return: (alpha, teta, gamma) - углы в треугольнике сил
"""
cos_teta = (l1**2 + (2*self.a)**2 - l2**2) / 2*l1*2*self.a
teta = np.arccos(cos_teta) * 180. / np.pi
b = l1**2 + self.a**2 - 2*l1*self.a*cos_teta
cos_alpha = (l1**2 + b**2 - l2**2) / 2*l1*self.a
alpha = np.arccos(cos_alpha) * 180. / np.pi
gamma = 180. - teta - alpha
return alpha, teta, gamma
def set_r(self):
"""
Вычисление радиус-векторов плеч сил для каждого цилиндра
:return: None
"""
r_all = []
for i, j in self.indexes:
r = []
for a in self.A[i]:
L = np.array(a - self.B[j])
direct_L = L / np.linalg.norm(L)
t1 = np.array([0, -direct_L[2], direct_L[1]])
b1 = np.array([a[2]*direct_L[1] - a[1]*direct_L[2]])
t2 = direct_L
b2 = np.array([0])
t3 = np.array([a[1]*direct_L[2] - a[2]*direct_L[1],
-a[0]*direct_L[2] + a[2]*direct_L[0],
a[0]*direct_L[1] - a[1]*direct_L[0]])
b3 = np.array([0])
T = np.stack((t1, t2, t3))
b = np.stack((b1, b2, b3))
r.append(np.linalg.solve(T, b).reshape((3,)))
r_all.append(r)
self.r = np.array(r_all)
def solve_dynamic_forces(self):
"""
решение обратной задачи стенда
Первое приближение - решение двумерной зазадчи для пооврота оси вокруг оси х
:return: минимальная и максимальная нагрузка на каждый цилиндр
"""
print('####################################################')
print('[INFO] solve DYNAMIC forces ...')
print('####################################################')
A = []
for i in range(self.steps):
a = []
for j in range(3):
a_ = self.A[j, i, :]
a.append(a_)
A.append(a)
R = []
for i in range(self.steps):
r = []
for j in range(6):
r_ = self.r[j, i, :]
r.append(r_)
R.append(r)
A = np.array(A)
R = np.array(R)
forces = []
for a, r, t in zip(A, R, self.time):
L = []
direct = [] # направления сил
shoulder = [] # плечи сил
for i, j in self.indexes:
len = np.array(self.B[j] - a[i])
dir = len / np.linalg.norm(len)
L.append(len)
direct_force_try = self.B[j] - r[j]
# direct.append(dir)
direct.append(direct_force_try)
# direct.append(direct_force_try)
shoulder.append(np.cross(r[j], direct_force_try))
L = np.array(L)
T_static = np.array(direct).T
T_dynamics = np.array(shoulder).T
b_static = np.array([-self.m*9.8, 0, 0]).reshape((3, 1))
# определение направления действующих сил
dynamic_comp = None
if self.axis == 'x':
comp = self.J[2, 2] * self.prime2_fi_x(t)
dynamic_comp = np.array([comp, 0, 0]).reshape((3, 1))
elif self.axis == 'y':
comp = self.J[1, 1] * self.prime2_fi_y(t)
dynamic_comp = np.array([0, comp, 0]).reshape((3, 1))
elif self.axis == 'z':
comp = self.J[0, 0] * self.prime2_fi_y(t)
dynamic_comp = np.array([0, 0, comp]).reshape((3, 1))
b_dynamic = dynamic_comp
# T = np.vstack((T_static, T_dynamics))
# b = np.vstack((b_static, b_dynamic))
T = T_dynamics[:, :3]
b = b_dynamic[:, :3]
# print(T)
# print(b)
dynamic_f = np.linalg.solve(T, b).reshape((3,))
forces.append(dynamic_f)
print('[INFO] time:', t)
print('[INFO] length:', [round(np.linalg.norm(l), 4) for l in L])
print('[INFO] shoulders:', [round(np.linalg.norm(l), 4) for l in np.array(shoulder)])
print('[INFO] forces:', [round(f, 4) for f in dynamic_f])
print('[INFO] dynamic component:', b_dynamic.T)
print('****************************************************')
forces = np.array(forces).T
# график приложенной силы к цилиндрам от времени
colors = {0: 'r+--', 1: 'rx-',
2: 'g+--', 3: 'gx-',
4: 'b+--', 5: 'bx-'}
for i, j in self.indexes:
pylab.plot(self.time, forces[i], colors[j])
# colors = {0: 'r+--', 1: 'g+--', 2: 'b+--'}
# for i in range(3):
# pylab.plot(self.time, forces[i], colors[i], label='$F_{}$'.format(i))
pylab.legend([r'$F_1$', '$F_2$', '$F_3$', '$F_4$', '$F_5$', '$F_6$'], loc=0)
# plt.legend(loc="lower right")
pylab.title('Dynamic forces')
pylab.xlabel('Time [s]')
pylab.ylabel('Force [kg*m/s^2]')
pylab.grid()
plt.show()
def solve_static_forces(self):
"""
Решение обратной задачи стедна для статических нагрузок
:return: компоненты силы для каждой опоры
"""
print('####################################################')
print('[INFO] solve STATIC forces ...')
print('####################################################')
x_symmetry_ind = [[0, 0], [0, 1], [1, 2]]
forces = []
for a, t in zip(self.A.reshape((self.steps, 3, 3)), self.time):
L1 = np.array(a[0] - self.B[0])
L2 = np.array(a[0] - self.B[1])
L3 = np.array(a[1] - self.B[2])
direct_L1 = L1 / np.linalg.norm(L1)
direct_L2 = L2 / np.linalg.norm(L2)
direct_L3 = L3 / np.linalg.norm(L3)
T = np.stack((direct_L1, direct_L2, direct_L3))
b = np.array([-self.m*9.8/2, 0, 0]).reshape((3, 1))
static_f = np.linalg.solve(T, b).reshape((3,)) / 2
forces.append(static_f)
print('[INFO] time:', t)
print('[INFO] length:',
round(np.linalg.norm(L1), 4),
round(np.linalg.norm(L2), 4),
round(np.linalg.norm(L3), 4))
print('[INFO] forces:',
round(static_f[0]/2, 4),
round(static_f[1]/2, 4),
round(static_f[2]/2, 4))
print('****************************************************')
forces = np.array(forces).T
# график приложенной силы к цилиндрам от времени
colors = {0: 'r+--', 1: 'g+--', 2: 'b+--'}
for i, j in x_symmetry_ind:
pylab.plot(self.time, forces[j], colors[j])
pylab.legend([r'$F_1$', '$F_2$', '$F_3$'], loc=0)
pylab.title('Static forces')
pylab.xlabel('Time [s]')
pylab.ylabel('Force [kg*m/s^2]')
pylab.grid()
plt.show()
def plot_animate(self, A):
""""
try to create animate function to plot mechanisms
"""
fig = plt.figure()
fig.set_tight_layout(False)
ax = plt.axes(projection='3d')
global cnt
cnt = ax
global cur_A
global cur_B
cur_A = A[0]
cur_B = self.B[0]
def steps(count=1):
for i in range(count):
df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])
df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])
x = [df_A.x, df_B.x]
y = [df_A.y, df_B.y]
z = [df_A.z, df_B.z]
cnt.plot(x, y, z)
def animate(frame):
steps(1)
return cnt
anim = animation.FuncAnimation(fig, animate, frames=100)
plt.show()
if __name__ == "__main__":
hex = Hexapod(axis='y')
hex.set_B()
hex.get_delta_L()
hex.solve_static_forces()
hex.solve_dynamic_forces()
|
normal
|
{
"blob_id": "9a672c17ee22a05e77491bc1449c1c1678414a8c",
"index": 3094,
"step-1": "<mask token>\n\n\nclass Hexapod:\n <mask token>\n <mask token>\n\n def get_delta_L(self):\n \"\"\"\n Расчет геометрии положения точек A_i в каждый момент времени.\n Отрисовка графиков изменения длин, скорости и ускорения для каждого привода по времени.\n\n Задается: self.A, self.all_full_lengths, self.set_r\n :return: None\n \"\"\"\n print('####################################################')\n print('[INFO] solve delta L, Velocity, Acceleration ...')\n print('####################################################')\n R_matrix = None\n if self.axis == 'x':\n R_matrix = self.R_matrix_x\n elif self.axis == 'y':\n R_matrix = self.R_matrix_y\n elif self.axis == 'z':\n R_matrix = self.R_matrix_z\n dL_all = []\n L_all = []\n coordinates_A = []\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n print('[INFO] Поршень №{}'.format(j + 1))\n dl = []\n l = []\n coord = []\n for t in self.time:\n try:\n A = np.dot(R_matrix(t), self.A_0[i])\n except Exception:\n print('Type error axis')\n L = np.linalg.norm(self.B[j] - A)\n print(self.B[j] - A)\n print(self.L, L)\n print('dL[мм] = {:.5f}'.format((L - self.L) * 1000.0))\n l.append(L)\n dl.append(round((L - self.L) * 1000.0, 5))\n coord.append(A)\n dL_all.append(dl)\n L_all.append(l)\n coordinates_A.append(coord)\n v = [0.0]\n for k in range(self.steps - 1):\n v.append((dl[k + 1] - dl[k]) / (self.time[k + 1] - self.\n time[k]))\n pylab.figure(1)\n pylab.plot(self.time[5:], v[5:], colors[j])\n print('[INFO] v_max =', np.max(np.abs(v[5:])))\n a = [0.0]\n for k in range(self.steps - 1):\n a.append((v[k + 1] - v[k]) / (self.time[k + 1] - self.time[k]))\n pylab.figure(2)\n pylab.plot(self.time[5:], a[5:], colors[j])\n print('[INFO] a_max =', np.max(np.abs(a[5:])))\n print('****************************************************')\n pylab.figure(1)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Velocity')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Velocity [mm/s]')\n pylab.grid()\n pylab.figure(2)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Acceleration')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Acceleration [mm/s^2]')\n pylab.grid()\n pylab.figure(3)\n for i in range(6):\n pylab.plot(self.time, dL_all[i], colors[i])\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Delta length')\n pylab.xlabel('Time [s]')\n pylab.ylabel('dL [mm]')\n pylab.grid()\n plt.show()\n self.A = np.array(coordinates_A[0::2])\n self.all_full_lengths = np.array(L_all)\n self.set_r()\n self.plot_3d_lines()\n <mask token>\n\n def calculate_angles(self, l1, l2):\n \"\"\"\n Решение теоремы косинусов для поиска угла по трем сторонам\n :param l1: прилежащая сторона к вычисляемому углу\n :param l2: противолежащая сторона к углу\n :return: (alpha, teta, gamma) - углы в треугольнике сил\n \"\"\"\n cos_teta = (l1 ** 2 + (2 * self.a) ** 2 - l2 ** 2\n ) / 2 * l1 * 2 * self.a\n teta = np.arccos(cos_teta) * 180.0 / np.pi\n b = l1 ** 2 + self.a ** 2 - 2 * l1 * self.a * cos_teta\n cos_alpha = (l1 ** 2 + b ** 2 - l2 ** 2) / 2 * l1 * self.a\n alpha = np.arccos(cos_alpha) * 180.0 / np.pi\n gamma = 180.0 - teta - alpha\n return alpha, teta, gamma\n <mask token>\n\n def solve_dynamic_forces(self):\n \"\"\"\n решение обратной задачи стенда\n Первое приближение - решение двумерной зазадчи для пооврота оси вокруг оси х\n :return: минимальная и максимальная нагрузка на каждый цилиндр\n \"\"\"\n print('####################################################')\n print('[INFO] solve DYNAMIC forces ...')\n print('####################################################')\n A = []\n for i in range(self.steps):\n a = []\n for j in range(3):\n a_ = self.A[j, i, :]\n a.append(a_)\n A.append(a)\n R = []\n for i in range(self.steps):\n r = []\n for j in range(6):\n r_ = self.r[j, i, :]\n r.append(r_)\n R.append(r)\n A = np.array(A)\n R = np.array(R)\n forces = []\n for a, r, t in zip(A, R, self.time):\n L = []\n direct = []\n shoulder = []\n for i, j in self.indexes:\n len = np.array(self.B[j] - a[i])\n dir = len / np.linalg.norm(len)\n L.append(len)\n direct_force_try = self.B[j] - r[j]\n direct.append(direct_force_try)\n shoulder.append(np.cross(r[j], direct_force_try))\n L = np.array(L)\n T_static = np.array(direct).T\n T_dynamics = np.array(shoulder).T\n b_static = np.array([-self.m * 9.8, 0, 0]).reshape((3, 1))\n dynamic_comp = None\n if self.axis == 'x':\n comp = self.J[2, 2] * self.prime2_fi_x(t)\n dynamic_comp = np.array([comp, 0, 0]).reshape((3, 1))\n elif self.axis == 'y':\n comp = self.J[1, 1] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, comp, 0]).reshape((3, 1))\n elif self.axis == 'z':\n comp = self.J[0, 0] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, 0, comp]).reshape((3, 1))\n b_dynamic = dynamic_comp\n T = T_dynamics[:, :3]\n b = b_dynamic[:, :3]\n dynamic_f = np.linalg.solve(T, b).reshape((3,))\n forces.append(dynamic_f)\n print('[INFO] time:', t)\n print('[INFO] length:', [round(np.linalg.norm(l), 4) for l in L])\n print('[INFO] shoulders:', [round(np.linalg.norm(l), 4) for l in\n np.array(shoulder)])\n print('[INFO] forces:', [round(f, 4) for f in dynamic_f])\n print('[INFO] dynamic component:', b_dynamic.T)\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n pylab.plot(self.time, forces[i], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$', '$F_4$', '$F_5$', '$F_6$'],\n loc=0)\n pylab.title('Dynamic forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def solve_static_forces(self):\n \"\"\"\n Решение обратной задачи стедна для статических нагрузок\n :return: компоненты силы для каждой опоры\n \"\"\"\n print('####################################################')\n print('[INFO] solve STATIC forces ...')\n print('####################################################')\n x_symmetry_ind = [[0, 0], [0, 1], [1, 2]]\n forces = []\n for a, t in zip(self.A.reshape((self.steps, 3, 3)), self.time):\n L1 = np.array(a[0] - self.B[0])\n L2 = np.array(a[0] - self.B[1])\n L3 = np.array(a[1] - self.B[2])\n direct_L1 = L1 / np.linalg.norm(L1)\n direct_L2 = L2 / np.linalg.norm(L2)\n direct_L3 = L3 / np.linalg.norm(L3)\n T = np.stack((direct_L1, direct_L2, direct_L3))\n b = np.array([-self.m * 9.8 / 2, 0, 0]).reshape((3, 1))\n static_f = np.linalg.solve(T, b).reshape((3,)) / 2\n forces.append(static_f)\n print('[INFO] time:', t)\n print('[INFO] length:', round(np.linalg.norm(L1), 4), round(np.\n linalg.norm(L2), 4), round(np.linalg.norm(L3), 4))\n print('[INFO] forces:', round(static_f[0] / 2, 4), round(\n static_f[1] / 2, 4), round(static_f[2] / 2, 4))\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'g+--', (2): 'b+--'}\n for i, j in x_symmetry_ind:\n pylab.plot(self.time, forces[j], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$'], loc=0)\n pylab.title('Static forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def plot_animate(self, A):\n \"\"\"\"\n try to create animate function to plot mechanisms\n \"\"\"\n fig = plt.figure()\n fig.set_tight_layout(False)\n ax = plt.axes(projection='3d')\n global cnt\n cnt = ax\n global cur_A\n global cur_B\n cur_A = A[0]\n cur_B = self.B[0]\n\n def steps(count=1):\n for i in range(count):\n df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n cnt.plot(x, y, z)\n\n def animate(frame):\n steps(1)\n return cnt\n anim = animation.FuncAnimation(fig, animate, frames=100)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Hexapod:\n\n def __init__(self, axis):\n \"\"\"\n Инициализация начальных параметров системы\n :param axis: ось вращения системы (x, y, z)\n \"\"\"\n self.axis = axis\n self.alpha = 30.0\n self.beta = 30.0\n self.L = 1.5\n self.h_c = 2.0\n self.r = 1.0\n self.m_p = 1000.0\n self.m = 4000.0\n self.nu = 0.5\n self.J = np.array([[5000, 0, 0], [0, 5000, 0], [0, 0, 3500]], np.\n float32)\n self.A_0 = np.round([[self.r * np.sin(2 * np.pi / 3 * i + np.pi), \n self.r * np.cos(2 * np.pi / 3 * i + np.pi), -self.h_c] for i in\n range(-1, 2)], 5)\n self.B = np.array([])\n self.A = np.array([])\n self.all_full_lengths = np.array([])\n self.r = np.array([])\n self.fi_x_0 = 4.0\n self.fi_x = lambda t: self.fi_x_0 * np.sin(2 * np.pi * self.nu * t)\n self.prime_fi_x = lambda t: self.fi_x_0 * 2 * np.pi * self.nu * np.cos(\n 2 * np.pi * self.nu * t)\n self.prime2_fi_x = lambda t: -self.fi_x_0 * (2 * np.pi * self.nu\n ) ** 2 * np.sin(2 * np.pi * self.nu * t)\n self.fi_y_0 = 4.0\n self.fi_y = lambda t: self.fi_y_0 * np.sin(2 * np.pi * self.nu * t)\n self.prime_fi_y = lambda t: self.fi_y_0 * 2 * np.pi * self.nu * np.cos(\n 2 * np.pi * self.nu * t)\n self.prime2_fi_y = lambda t: -self.fi_y_0 * (2 * np.pi * self.nu\n ) ** 2 * np.sin(2 * np.pi * self.nu * t)\n self.R_matrix_x = lambda t: np.round([[np.cos(self.fi_x(t) * np.pi /\n 180.0), -np.sin(self.fi_x(t) * np.pi / 180.0), 0], [np.sin(self\n .fi_x(t) * np.pi / 180.0), np.cos(self.fi_x(t) * np.pi / 180.0),\n 0], [0, 0, 1]], 5)\n self.R_matrix_y = lambda t: np.round([[1, 0, 0], [0, np.cos(self.\n fi_y(t) * np.pi / 180.0), -np.sin(self.fi_y(t) * np.pi / 180.0)\n ], [0, np.sin(self.fi_y(t) * np.pi / 180.0), np.cos(self.fi_y(t\n ) * np.pi / 180.0)]], 5)\n self.R_matrix_z = lambda t: np.round([[np.cos(self.fi_y(t) * np.pi /\n 180.0), 0, np.sin(self.fi_y(t) * np.pi / 180.0)], [0, 1, 0], [-\n np.sin(self.fi_y(t) * np.pi / 180.0), 0, np.cos(self.fi_y(t) *\n np.pi / 180.0)]], 5)\n self.H = np.cos(np.pi / 180.0 * self.beta) * np.cos(np.pi / 180.0 *\n self.alpha / 2) * self.L\n self.h = self.L * np.cos(np.pi / 180.0 * self.alpha / 2) * np.sin(\n np.pi / 180.0 * self.beta)\n self.a = self.L * np.sin(np.pi / 180.0 * self.alpha / 2)\n self.r = (self.h ** 2 + self.a ** 2) ** 0.5\n self.end_time = 2.0\n self.start_time = 0.0\n self.steps = 100\n self.time = np.linspace(self.start_time, self.end_time, self.steps)\n self.indexes = [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]]\n\n def set_B(self):\n \"\"\"\n Расчет геометрии стенда - положение точек B_i.\n\n Задается: self.B\n :return: None\n \"\"\"\n for i, A in enumerate(self.A_0):\n a = A[:2]\n b1 = np.array([self.h, self.a])\n b2 = np.array([self.h, -self.a])\n kappa = np.array([[np.cos(np.pi / 180 * (30 - 120 * i)), -np.\n sin(np.pi / 180 * (30 - 120 * i))], [np.sin(np.pi / 180 * (\n 30 - 120 * i)), np.cos(np.pi / 180 * (30 - 120 * i))]])\n p1 = np.dot(kappa, b1) + a\n p2 = np.dot(kappa, b2) + a\n p1 = np.append(p1, -self.H - self.h_c)\n p2 = np.append(p2, -self.H - self.h_c)\n self.B = np.hstack((self.B, p1))\n self.B = np.hstack((self.B, p2))\n self.B = self.B.reshape(6, 3)\n i = 0\n for A in self.A_0:\n assert np.linalg.norm(np.subtract(A, self.B[i])) - self.L <= 0.0001\n assert np.linalg.norm(np.subtract(A, self.B[i + 1])\n ) - self.L <= 0.0001\n i += 2\n\n def get_delta_L(self):\n \"\"\"\n Расчет геометрии положения точек A_i в каждый момент времени.\n Отрисовка графиков изменения длин, скорости и ускорения для каждого привода по времени.\n\n Задается: self.A, self.all_full_lengths, self.set_r\n :return: None\n \"\"\"\n print('####################################################')\n print('[INFO] solve delta L, Velocity, Acceleration ...')\n print('####################################################')\n R_matrix = None\n if self.axis == 'x':\n R_matrix = self.R_matrix_x\n elif self.axis == 'y':\n R_matrix = self.R_matrix_y\n elif self.axis == 'z':\n R_matrix = self.R_matrix_z\n dL_all = []\n L_all = []\n coordinates_A = []\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n print('[INFO] Поршень №{}'.format(j + 1))\n dl = []\n l = []\n coord = []\n for t in self.time:\n try:\n A = np.dot(R_matrix(t), self.A_0[i])\n except Exception:\n print('Type error axis')\n L = np.linalg.norm(self.B[j] - A)\n print(self.B[j] - A)\n print(self.L, L)\n print('dL[мм] = {:.5f}'.format((L - self.L) * 1000.0))\n l.append(L)\n dl.append(round((L - self.L) * 1000.0, 5))\n coord.append(A)\n dL_all.append(dl)\n L_all.append(l)\n coordinates_A.append(coord)\n v = [0.0]\n for k in range(self.steps - 1):\n v.append((dl[k + 1] - dl[k]) / (self.time[k + 1] - self.\n time[k]))\n pylab.figure(1)\n pylab.plot(self.time[5:], v[5:], colors[j])\n print('[INFO] v_max =', np.max(np.abs(v[5:])))\n a = [0.0]\n for k in range(self.steps - 1):\n a.append((v[k + 1] - v[k]) / (self.time[k + 1] - self.time[k]))\n pylab.figure(2)\n pylab.plot(self.time[5:], a[5:], colors[j])\n print('[INFO] a_max =', np.max(np.abs(a[5:])))\n print('****************************************************')\n pylab.figure(1)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Velocity')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Velocity [mm/s]')\n pylab.grid()\n pylab.figure(2)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Acceleration')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Acceleration [mm/s^2]')\n pylab.grid()\n pylab.figure(3)\n for i in range(6):\n pylab.plot(self.time, dL_all[i], colors[i])\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Delta length')\n pylab.xlabel('Time [s]')\n pylab.ylabel('dL [mm]')\n pylab.grid()\n plt.show()\n self.A = np.array(coordinates_A[0::2])\n self.all_full_lengths = np.array(L_all)\n self.set_r()\n self.plot_3d_lines()\n <mask token>\n\n def calculate_angles(self, l1, l2):\n \"\"\"\n Решение теоремы косинусов для поиска угла по трем сторонам\n :param l1: прилежащая сторона к вычисляемому углу\n :param l2: противолежащая сторона к углу\n :return: (alpha, teta, gamma) - углы в треугольнике сил\n \"\"\"\n cos_teta = (l1 ** 2 + (2 * self.a) ** 2 - l2 ** 2\n ) / 2 * l1 * 2 * self.a\n teta = np.arccos(cos_teta) * 180.0 / np.pi\n b = l1 ** 2 + self.a ** 2 - 2 * l1 * self.a * cos_teta\n cos_alpha = (l1 ** 2 + b ** 2 - l2 ** 2) / 2 * l1 * self.a\n alpha = np.arccos(cos_alpha) * 180.0 / np.pi\n gamma = 180.0 - teta - alpha\n return alpha, teta, gamma\n\n def set_r(self):\n \"\"\"\n Вычисление радиус-векторов плеч сил для каждого цилиндра\n :return: None\n \"\"\"\n r_all = []\n for i, j in self.indexes:\n r = []\n for a in self.A[i]:\n L = np.array(a - self.B[j])\n direct_L = L / np.linalg.norm(L)\n t1 = np.array([0, -direct_L[2], direct_L[1]])\n b1 = np.array([a[2] * direct_L[1] - a[1] * direct_L[2]])\n t2 = direct_L\n b2 = np.array([0])\n t3 = np.array([a[1] * direct_L[2] - a[2] * direct_L[1], -a[\n 0] * direct_L[2] + a[2] * direct_L[0], a[0] * direct_L[\n 1] - a[1] * direct_L[0]])\n b3 = np.array([0])\n T = np.stack((t1, t2, t3))\n b = np.stack((b1, b2, b3))\n r.append(np.linalg.solve(T, b).reshape((3,)))\n r_all.append(r)\n self.r = np.array(r_all)\n\n def solve_dynamic_forces(self):\n \"\"\"\n решение обратной задачи стенда\n Первое приближение - решение двумерной зазадчи для пооврота оси вокруг оси х\n :return: минимальная и максимальная нагрузка на каждый цилиндр\n \"\"\"\n print('####################################################')\n print('[INFO] solve DYNAMIC forces ...')\n print('####################################################')\n A = []\n for i in range(self.steps):\n a = []\n for j in range(3):\n a_ = self.A[j, i, :]\n a.append(a_)\n A.append(a)\n R = []\n for i in range(self.steps):\n r = []\n for j in range(6):\n r_ = self.r[j, i, :]\n r.append(r_)\n R.append(r)\n A = np.array(A)\n R = np.array(R)\n forces = []\n for a, r, t in zip(A, R, self.time):\n L = []\n direct = []\n shoulder = []\n for i, j in self.indexes:\n len = np.array(self.B[j] - a[i])\n dir = len / np.linalg.norm(len)\n L.append(len)\n direct_force_try = self.B[j] - r[j]\n direct.append(direct_force_try)\n shoulder.append(np.cross(r[j], direct_force_try))\n L = np.array(L)\n T_static = np.array(direct).T\n T_dynamics = np.array(shoulder).T\n b_static = np.array([-self.m * 9.8, 0, 0]).reshape((3, 1))\n dynamic_comp = None\n if self.axis == 'x':\n comp = self.J[2, 2] * self.prime2_fi_x(t)\n dynamic_comp = np.array([comp, 0, 0]).reshape((3, 1))\n elif self.axis == 'y':\n comp = self.J[1, 1] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, comp, 0]).reshape((3, 1))\n elif self.axis == 'z':\n comp = self.J[0, 0] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, 0, comp]).reshape((3, 1))\n b_dynamic = dynamic_comp\n T = T_dynamics[:, :3]\n b = b_dynamic[:, :3]\n dynamic_f = np.linalg.solve(T, b).reshape((3,))\n forces.append(dynamic_f)\n print('[INFO] time:', t)\n print('[INFO] length:', [round(np.linalg.norm(l), 4) for l in L])\n print('[INFO] shoulders:', [round(np.linalg.norm(l), 4) for l in\n np.array(shoulder)])\n print('[INFO] forces:', [round(f, 4) for f in dynamic_f])\n print('[INFO] dynamic component:', b_dynamic.T)\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n pylab.plot(self.time, forces[i], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$', '$F_4$', '$F_5$', '$F_6$'],\n loc=0)\n pylab.title('Dynamic forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def solve_static_forces(self):\n \"\"\"\n Решение обратной задачи стедна для статических нагрузок\n :return: компоненты силы для каждой опоры\n \"\"\"\n print('####################################################')\n print('[INFO] solve STATIC forces ...')\n print('####################################################')\n x_symmetry_ind = [[0, 0], [0, 1], [1, 2]]\n forces = []\n for a, t in zip(self.A.reshape((self.steps, 3, 3)), self.time):\n L1 = np.array(a[0] - self.B[0])\n L2 = np.array(a[0] - self.B[1])\n L3 = np.array(a[1] - self.B[2])\n direct_L1 = L1 / np.linalg.norm(L1)\n direct_L2 = L2 / np.linalg.norm(L2)\n direct_L3 = L3 / np.linalg.norm(L3)\n T = np.stack((direct_L1, direct_L2, direct_L3))\n b = np.array([-self.m * 9.8 / 2, 0, 0]).reshape((3, 1))\n static_f = np.linalg.solve(T, b).reshape((3,)) / 2\n forces.append(static_f)\n print('[INFO] time:', t)\n print('[INFO] length:', round(np.linalg.norm(L1), 4), round(np.\n linalg.norm(L2), 4), round(np.linalg.norm(L3), 4))\n print('[INFO] forces:', round(static_f[0] / 2, 4), round(\n static_f[1] / 2, 4), round(static_f[2] / 2, 4))\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'g+--', (2): 'b+--'}\n for i, j in x_symmetry_ind:\n pylab.plot(self.time, forces[j], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$'], loc=0)\n pylab.title('Static forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def plot_animate(self, A):\n \"\"\"\"\n try to create animate function to plot mechanisms\n \"\"\"\n fig = plt.figure()\n fig.set_tight_layout(False)\n ax = plt.axes(projection='3d')\n global cnt\n cnt = ax\n global cur_A\n global cur_B\n cur_A = A[0]\n cur_B = self.B[0]\n\n def steps(count=1):\n for i in range(count):\n df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n cnt.plot(x, y, z)\n\n def animate(frame):\n steps(1)\n return cnt\n anim = animation.FuncAnimation(fig, animate, frames=100)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Hexapod:\n\n def __init__(self, axis):\n \"\"\"\n Инициализация начальных параметров системы\n :param axis: ось вращения системы (x, y, z)\n \"\"\"\n self.axis = axis\n self.alpha = 30.0\n self.beta = 30.0\n self.L = 1.5\n self.h_c = 2.0\n self.r = 1.0\n self.m_p = 1000.0\n self.m = 4000.0\n self.nu = 0.5\n self.J = np.array([[5000, 0, 0], [0, 5000, 0], [0, 0, 3500]], np.\n float32)\n self.A_0 = np.round([[self.r * np.sin(2 * np.pi / 3 * i + np.pi), \n self.r * np.cos(2 * np.pi / 3 * i + np.pi), -self.h_c] for i in\n range(-1, 2)], 5)\n self.B = np.array([])\n self.A = np.array([])\n self.all_full_lengths = np.array([])\n self.r = np.array([])\n self.fi_x_0 = 4.0\n self.fi_x = lambda t: self.fi_x_0 * np.sin(2 * np.pi * self.nu * t)\n self.prime_fi_x = lambda t: self.fi_x_0 * 2 * np.pi * self.nu * np.cos(\n 2 * np.pi * self.nu * t)\n self.prime2_fi_x = lambda t: -self.fi_x_0 * (2 * np.pi * self.nu\n ) ** 2 * np.sin(2 * np.pi * self.nu * t)\n self.fi_y_0 = 4.0\n self.fi_y = lambda t: self.fi_y_0 * np.sin(2 * np.pi * self.nu * t)\n self.prime_fi_y = lambda t: self.fi_y_0 * 2 * np.pi * self.nu * np.cos(\n 2 * np.pi * self.nu * t)\n self.prime2_fi_y = lambda t: -self.fi_y_0 * (2 * np.pi * self.nu\n ) ** 2 * np.sin(2 * np.pi * self.nu * t)\n self.R_matrix_x = lambda t: np.round([[np.cos(self.fi_x(t) * np.pi /\n 180.0), -np.sin(self.fi_x(t) * np.pi / 180.0), 0], [np.sin(self\n .fi_x(t) * np.pi / 180.0), np.cos(self.fi_x(t) * np.pi / 180.0),\n 0], [0, 0, 1]], 5)\n self.R_matrix_y = lambda t: np.round([[1, 0, 0], [0, np.cos(self.\n fi_y(t) * np.pi / 180.0), -np.sin(self.fi_y(t) * np.pi / 180.0)\n ], [0, np.sin(self.fi_y(t) * np.pi / 180.0), np.cos(self.fi_y(t\n ) * np.pi / 180.0)]], 5)\n self.R_matrix_z = lambda t: np.round([[np.cos(self.fi_y(t) * np.pi /\n 180.0), 0, np.sin(self.fi_y(t) * np.pi / 180.0)], [0, 1, 0], [-\n np.sin(self.fi_y(t) * np.pi / 180.0), 0, np.cos(self.fi_y(t) *\n np.pi / 180.0)]], 5)\n self.H = np.cos(np.pi / 180.0 * self.beta) * np.cos(np.pi / 180.0 *\n self.alpha / 2) * self.L\n self.h = self.L * np.cos(np.pi / 180.0 * self.alpha / 2) * np.sin(\n np.pi / 180.0 * self.beta)\n self.a = self.L * np.sin(np.pi / 180.0 * self.alpha / 2)\n self.r = (self.h ** 2 + self.a ** 2) ** 0.5\n self.end_time = 2.0\n self.start_time = 0.0\n self.steps = 100\n self.time = np.linspace(self.start_time, self.end_time, self.steps)\n self.indexes = [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]]\n\n def set_B(self):\n \"\"\"\n Расчет геометрии стенда - положение точек B_i.\n\n Задается: self.B\n :return: None\n \"\"\"\n for i, A in enumerate(self.A_0):\n a = A[:2]\n b1 = np.array([self.h, self.a])\n b2 = np.array([self.h, -self.a])\n kappa = np.array([[np.cos(np.pi / 180 * (30 - 120 * i)), -np.\n sin(np.pi / 180 * (30 - 120 * i))], [np.sin(np.pi / 180 * (\n 30 - 120 * i)), np.cos(np.pi / 180 * (30 - 120 * i))]])\n p1 = np.dot(kappa, b1) + a\n p2 = np.dot(kappa, b2) + a\n p1 = np.append(p1, -self.H - self.h_c)\n p2 = np.append(p2, -self.H - self.h_c)\n self.B = np.hstack((self.B, p1))\n self.B = np.hstack((self.B, p2))\n self.B = self.B.reshape(6, 3)\n i = 0\n for A in self.A_0:\n assert np.linalg.norm(np.subtract(A, self.B[i])) - self.L <= 0.0001\n assert np.linalg.norm(np.subtract(A, self.B[i + 1])\n ) - self.L <= 0.0001\n i += 2\n\n def get_delta_L(self):\n \"\"\"\n Расчет геометрии положения точек A_i в каждый момент времени.\n Отрисовка графиков изменения длин, скорости и ускорения для каждого привода по времени.\n\n Задается: self.A, self.all_full_lengths, self.set_r\n :return: None\n \"\"\"\n print('####################################################')\n print('[INFO] solve delta L, Velocity, Acceleration ...')\n print('####################################################')\n R_matrix = None\n if self.axis == 'x':\n R_matrix = self.R_matrix_x\n elif self.axis == 'y':\n R_matrix = self.R_matrix_y\n elif self.axis == 'z':\n R_matrix = self.R_matrix_z\n dL_all = []\n L_all = []\n coordinates_A = []\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n print('[INFO] Поршень №{}'.format(j + 1))\n dl = []\n l = []\n coord = []\n for t in self.time:\n try:\n A = np.dot(R_matrix(t), self.A_0[i])\n except Exception:\n print('Type error axis')\n L = np.linalg.norm(self.B[j] - A)\n print(self.B[j] - A)\n print(self.L, L)\n print('dL[мм] = {:.5f}'.format((L - self.L) * 1000.0))\n l.append(L)\n dl.append(round((L - self.L) * 1000.0, 5))\n coord.append(A)\n dL_all.append(dl)\n L_all.append(l)\n coordinates_A.append(coord)\n v = [0.0]\n for k in range(self.steps - 1):\n v.append((dl[k + 1] - dl[k]) / (self.time[k + 1] - self.\n time[k]))\n pylab.figure(1)\n pylab.plot(self.time[5:], v[5:], colors[j])\n print('[INFO] v_max =', np.max(np.abs(v[5:])))\n a = [0.0]\n for k in range(self.steps - 1):\n a.append((v[k + 1] - v[k]) / (self.time[k + 1] - self.time[k]))\n pylab.figure(2)\n pylab.plot(self.time[5:], a[5:], colors[j])\n print('[INFO] a_max =', np.max(np.abs(a[5:])))\n print('****************************************************')\n pylab.figure(1)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Velocity')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Velocity [mm/s]')\n pylab.grid()\n pylab.figure(2)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Acceleration')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Acceleration [mm/s^2]')\n pylab.grid()\n pylab.figure(3)\n for i in range(6):\n pylab.plot(self.time, dL_all[i], colors[i])\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Delta length')\n pylab.xlabel('Time [s]')\n pylab.ylabel('dL [mm]')\n pylab.grid()\n plt.show()\n self.A = np.array(coordinates_A[0::2])\n self.all_full_lengths = np.array(L_all)\n self.set_r()\n self.plot_3d_lines()\n\n def plot_3d_lines(self):\n \"\"\"\n Покадровая отрисовка геометрии стенда в 3D.\n :return: None\n \"\"\"\n pylab.figure(figsize=(12, 10))\n ax = pylab.axes(projection='3d')\n colors = {(0): 'r', (1): 'orange', (2): 'g', (3): 'olive', (4): 'b',\n (5): 'navy'}\n markers = {(0): '^', (1): '^', (2): 'o', (3): 'o', (4): '*', (5): '*'}\n for i, j in self.indexes:\n df_A = pd.Series(data=self.A_0[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n ax.scatter(x, y, z, c=colors[j], marker=markers[j], s=20.0)\n ax.legend(['1', '2', '3', '4', '5', '6'], loc=0)\n for i, j in self.indexes:\n k = 0\n for a, r in zip(self.A[i], self.r[j]):\n df_A = pd.Series(data=a, index=['x', 'y', 'z'])\n df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])\n df_r = pd.Series(data=r, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n x1 = [df_r.x, 0]\n y1 = [df_r.y, 0]\n z1 = [df_r.z, 0]\n x2 = [df_r.x, df_B.x]\n y2 = [df_r.y, df_B.y]\n z2 = [df_r.z, df_B.z]\n if k % int(self.steps - 1) == 0:\n ax.plot(x, y, z, c=colors[j], marker=markers[j])\n k += 1\n for i in range(0, self.steps, 9):\n a = np.array([self.A[0, i], self.A[1, i], self.A[2, i]])\n df_A = pd.DataFrame(data=a, columns=['x', 'y', 'z'])\n df_A = pd.concat((df_A, df_A.take([0])), axis=0)\n ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='gray')\n df_B = pd.DataFrame(data=self.B, columns=['x', 'y', 'z'])\n df_B = pd.concat((df_B, df_B.take([0])))\n df_A = pd.DataFrame(data=self.A_0, columns=['x', 'y', 'z'])\n df_A = pd.concat((df_A, df_A.take([0])), axis=0)\n ax.plot(df_B.x.values, df_B.y.values, df_B.z.values, c='black',\n linewidth=4.0)\n ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='black',\n linewidth=4.0)\n ax.view_init(30, -39)\n plt.show()\n\n def calculate_angles(self, l1, l2):\n \"\"\"\n Решение теоремы косинусов для поиска угла по трем сторонам\n :param l1: прилежащая сторона к вычисляемому углу\n :param l2: противолежащая сторона к углу\n :return: (alpha, teta, gamma) - углы в треугольнике сил\n \"\"\"\n cos_teta = (l1 ** 2 + (2 * self.a) ** 2 - l2 ** 2\n ) / 2 * l1 * 2 * self.a\n teta = np.arccos(cos_teta) * 180.0 / np.pi\n b = l1 ** 2 + self.a ** 2 - 2 * l1 * self.a * cos_teta\n cos_alpha = (l1 ** 2 + b ** 2 - l2 ** 2) / 2 * l1 * self.a\n alpha = np.arccos(cos_alpha) * 180.0 / np.pi\n gamma = 180.0 - teta - alpha\n return alpha, teta, gamma\n\n def set_r(self):\n \"\"\"\n Вычисление радиус-векторов плеч сил для каждого цилиндра\n :return: None\n \"\"\"\n r_all = []\n for i, j in self.indexes:\n r = []\n for a in self.A[i]:\n L = np.array(a - self.B[j])\n direct_L = L / np.linalg.norm(L)\n t1 = np.array([0, -direct_L[2], direct_L[1]])\n b1 = np.array([a[2] * direct_L[1] - a[1] * direct_L[2]])\n t2 = direct_L\n b2 = np.array([0])\n t3 = np.array([a[1] * direct_L[2] - a[2] * direct_L[1], -a[\n 0] * direct_L[2] + a[2] * direct_L[0], a[0] * direct_L[\n 1] - a[1] * direct_L[0]])\n b3 = np.array([0])\n T = np.stack((t1, t2, t3))\n b = np.stack((b1, b2, b3))\n r.append(np.linalg.solve(T, b).reshape((3,)))\n r_all.append(r)\n self.r = np.array(r_all)\n\n def solve_dynamic_forces(self):\n \"\"\"\n решение обратной задачи стенда\n Первое приближение - решение двумерной зазадчи для пооврота оси вокруг оси х\n :return: минимальная и максимальная нагрузка на каждый цилиндр\n \"\"\"\n print('####################################################')\n print('[INFO] solve DYNAMIC forces ...')\n print('####################################################')\n A = []\n for i in range(self.steps):\n a = []\n for j in range(3):\n a_ = self.A[j, i, :]\n a.append(a_)\n A.append(a)\n R = []\n for i in range(self.steps):\n r = []\n for j in range(6):\n r_ = self.r[j, i, :]\n r.append(r_)\n R.append(r)\n A = np.array(A)\n R = np.array(R)\n forces = []\n for a, r, t in zip(A, R, self.time):\n L = []\n direct = []\n shoulder = []\n for i, j in self.indexes:\n len = np.array(self.B[j] - a[i])\n dir = len / np.linalg.norm(len)\n L.append(len)\n direct_force_try = self.B[j] - r[j]\n direct.append(direct_force_try)\n shoulder.append(np.cross(r[j], direct_force_try))\n L = np.array(L)\n T_static = np.array(direct).T\n T_dynamics = np.array(shoulder).T\n b_static = np.array([-self.m * 9.8, 0, 0]).reshape((3, 1))\n dynamic_comp = None\n if self.axis == 'x':\n comp = self.J[2, 2] * self.prime2_fi_x(t)\n dynamic_comp = np.array([comp, 0, 0]).reshape((3, 1))\n elif self.axis == 'y':\n comp = self.J[1, 1] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, comp, 0]).reshape((3, 1))\n elif self.axis == 'z':\n comp = self.J[0, 0] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, 0, comp]).reshape((3, 1))\n b_dynamic = dynamic_comp\n T = T_dynamics[:, :3]\n b = b_dynamic[:, :3]\n dynamic_f = np.linalg.solve(T, b).reshape((3,))\n forces.append(dynamic_f)\n print('[INFO] time:', t)\n print('[INFO] length:', [round(np.linalg.norm(l), 4) for l in L])\n print('[INFO] shoulders:', [round(np.linalg.norm(l), 4) for l in\n np.array(shoulder)])\n print('[INFO] forces:', [round(f, 4) for f in dynamic_f])\n print('[INFO] dynamic component:', b_dynamic.T)\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n pylab.plot(self.time, forces[i], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$', '$F_4$', '$F_5$', '$F_6$'],\n loc=0)\n pylab.title('Dynamic forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def solve_static_forces(self):\n \"\"\"\n Решение обратной задачи стедна для статических нагрузок\n :return: компоненты силы для каждой опоры\n \"\"\"\n print('####################################################')\n print('[INFO] solve STATIC forces ...')\n print('####################################################')\n x_symmetry_ind = [[0, 0], [0, 1], [1, 2]]\n forces = []\n for a, t in zip(self.A.reshape((self.steps, 3, 3)), self.time):\n L1 = np.array(a[0] - self.B[0])\n L2 = np.array(a[0] - self.B[1])\n L3 = np.array(a[1] - self.B[2])\n direct_L1 = L1 / np.linalg.norm(L1)\n direct_L2 = L2 / np.linalg.norm(L2)\n direct_L3 = L3 / np.linalg.norm(L3)\n T = np.stack((direct_L1, direct_L2, direct_L3))\n b = np.array([-self.m * 9.8 / 2, 0, 0]).reshape((3, 1))\n static_f = np.linalg.solve(T, b).reshape((3,)) / 2\n forces.append(static_f)\n print('[INFO] time:', t)\n print('[INFO] length:', round(np.linalg.norm(L1), 4), round(np.\n linalg.norm(L2), 4), round(np.linalg.norm(L3), 4))\n print('[INFO] forces:', round(static_f[0] / 2, 4), round(\n static_f[1] / 2, 4), round(static_f[2] / 2, 4))\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'g+--', (2): 'b+--'}\n for i, j in x_symmetry_ind:\n pylab.plot(self.time, forces[j], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$'], loc=0)\n pylab.title('Static forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def plot_animate(self, A):\n \"\"\"\"\n try to create animate function to plot mechanisms\n \"\"\"\n fig = plt.figure()\n fig.set_tight_layout(False)\n ax = plt.axes(projection='3d')\n global cnt\n cnt = ax\n global cur_A\n global cur_B\n cur_A = A[0]\n cur_B = self.B[0]\n\n def steps(count=1):\n for i in range(count):\n df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n cnt.plot(x, y, z)\n\n def animate(frame):\n steps(1)\n return cnt\n anim = animation.FuncAnimation(fig, animate, frames=100)\n plt.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Hexapod:\n\n def __init__(self, axis):\n \"\"\"\n Инициализация начальных параметров системы\n :param axis: ось вращения системы (x, y, z)\n \"\"\"\n self.axis = axis\n self.alpha = 30.0\n self.beta = 30.0\n self.L = 1.5\n self.h_c = 2.0\n self.r = 1.0\n self.m_p = 1000.0\n self.m = 4000.0\n self.nu = 0.5\n self.J = np.array([[5000, 0, 0], [0, 5000, 0], [0, 0, 3500]], np.\n float32)\n self.A_0 = np.round([[self.r * np.sin(2 * np.pi / 3 * i + np.pi), \n self.r * np.cos(2 * np.pi / 3 * i + np.pi), -self.h_c] for i in\n range(-1, 2)], 5)\n self.B = np.array([])\n self.A = np.array([])\n self.all_full_lengths = np.array([])\n self.r = np.array([])\n self.fi_x_0 = 4.0\n self.fi_x = lambda t: self.fi_x_0 * np.sin(2 * np.pi * self.nu * t)\n self.prime_fi_x = lambda t: self.fi_x_0 * 2 * np.pi * self.nu * np.cos(\n 2 * np.pi * self.nu * t)\n self.prime2_fi_x = lambda t: -self.fi_x_0 * (2 * np.pi * self.nu\n ) ** 2 * np.sin(2 * np.pi * self.nu * t)\n self.fi_y_0 = 4.0\n self.fi_y = lambda t: self.fi_y_0 * np.sin(2 * np.pi * self.nu * t)\n self.prime_fi_y = lambda t: self.fi_y_0 * 2 * np.pi * self.nu * np.cos(\n 2 * np.pi * self.nu * t)\n self.prime2_fi_y = lambda t: -self.fi_y_0 * (2 * np.pi * self.nu\n ) ** 2 * np.sin(2 * np.pi * self.nu * t)\n self.R_matrix_x = lambda t: np.round([[np.cos(self.fi_x(t) * np.pi /\n 180.0), -np.sin(self.fi_x(t) * np.pi / 180.0), 0], [np.sin(self\n .fi_x(t) * np.pi / 180.0), np.cos(self.fi_x(t) * np.pi / 180.0),\n 0], [0, 0, 1]], 5)\n self.R_matrix_y = lambda t: np.round([[1, 0, 0], [0, np.cos(self.\n fi_y(t) * np.pi / 180.0), -np.sin(self.fi_y(t) * np.pi / 180.0)\n ], [0, np.sin(self.fi_y(t) * np.pi / 180.0), np.cos(self.fi_y(t\n ) * np.pi / 180.0)]], 5)\n self.R_matrix_z = lambda t: np.round([[np.cos(self.fi_y(t) * np.pi /\n 180.0), 0, np.sin(self.fi_y(t) * np.pi / 180.0)], [0, 1, 0], [-\n np.sin(self.fi_y(t) * np.pi / 180.0), 0, np.cos(self.fi_y(t) *\n np.pi / 180.0)]], 5)\n self.H = np.cos(np.pi / 180.0 * self.beta) * np.cos(np.pi / 180.0 *\n self.alpha / 2) * self.L\n self.h = self.L * np.cos(np.pi / 180.0 * self.alpha / 2) * np.sin(\n np.pi / 180.0 * self.beta)\n self.a = self.L * np.sin(np.pi / 180.0 * self.alpha / 2)\n self.r = (self.h ** 2 + self.a ** 2) ** 0.5\n self.end_time = 2.0\n self.start_time = 0.0\n self.steps = 100\n self.time = np.linspace(self.start_time, self.end_time, self.steps)\n self.indexes = [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]]\n\n def set_B(self):\n \"\"\"\n Расчет геометрии стенда - положение точек B_i.\n\n Задается: self.B\n :return: None\n \"\"\"\n for i, A in enumerate(self.A_0):\n a = A[:2]\n b1 = np.array([self.h, self.a])\n b2 = np.array([self.h, -self.a])\n kappa = np.array([[np.cos(np.pi / 180 * (30 - 120 * i)), -np.\n sin(np.pi / 180 * (30 - 120 * i))], [np.sin(np.pi / 180 * (\n 30 - 120 * i)), np.cos(np.pi / 180 * (30 - 120 * i))]])\n p1 = np.dot(kappa, b1) + a\n p2 = np.dot(kappa, b2) + a\n p1 = np.append(p1, -self.H - self.h_c)\n p2 = np.append(p2, -self.H - self.h_c)\n self.B = np.hstack((self.B, p1))\n self.B = np.hstack((self.B, p2))\n self.B = self.B.reshape(6, 3)\n i = 0\n for A in self.A_0:\n assert np.linalg.norm(np.subtract(A, self.B[i])) - self.L <= 0.0001\n assert np.linalg.norm(np.subtract(A, self.B[i + 1])\n ) - self.L <= 0.0001\n i += 2\n\n def get_delta_L(self):\n \"\"\"\n Расчет геометрии положения точек A_i в каждый момент времени.\n Отрисовка графиков изменения длин, скорости и ускорения для каждого привода по времени.\n\n Задается: self.A, self.all_full_lengths, self.set_r\n :return: None\n \"\"\"\n print('####################################################')\n print('[INFO] solve delta L, Velocity, Acceleration ...')\n print('####################################################')\n R_matrix = None\n if self.axis == 'x':\n R_matrix = self.R_matrix_x\n elif self.axis == 'y':\n R_matrix = self.R_matrix_y\n elif self.axis == 'z':\n R_matrix = self.R_matrix_z\n dL_all = []\n L_all = []\n coordinates_A = []\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n print('[INFO] Поршень №{}'.format(j + 1))\n dl = []\n l = []\n coord = []\n for t in self.time:\n try:\n A = np.dot(R_matrix(t), self.A_0[i])\n except Exception:\n print('Type error axis')\n L = np.linalg.norm(self.B[j] - A)\n print(self.B[j] - A)\n print(self.L, L)\n print('dL[мм] = {:.5f}'.format((L - self.L) * 1000.0))\n l.append(L)\n dl.append(round((L - self.L) * 1000.0, 5))\n coord.append(A)\n dL_all.append(dl)\n L_all.append(l)\n coordinates_A.append(coord)\n v = [0.0]\n for k in range(self.steps - 1):\n v.append((dl[k + 1] - dl[k]) / (self.time[k + 1] - self.\n time[k]))\n pylab.figure(1)\n pylab.plot(self.time[5:], v[5:], colors[j])\n print('[INFO] v_max =', np.max(np.abs(v[5:])))\n a = [0.0]\n for k in range(self.steps - 1):\n a.append((v[k + 1] - v[k]) / (self.time[k + 1] - self.time[k]))\n pylab.figure(2)\n pylab.plot(self.time[5:], a[5:], colors[j])\n print('[INFO] a_max =', np.max(np.abs(a[5:])))\n print('****************************************************')\n pylab.figure(1)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Velocity')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Velocity [mm/s]')\n pylab.grid()\n pylab.figure(2)\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Acceleration')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Acceleration [mm/s^2]')\n pylab.grid()\n pylab.figure(3)\n for i in range(6):\n pylab.plot(self.time, dL_all[i], colors[i])\n pylab.legend(['1 line', '2 line', '3 line', '4 line', '5 line',\n '6 line'], loc=0)\n pylab.title('Delta length')\n pylab.xlabel('Time [s]')\n pylab.ylabel('dL [mm]')\n pylab.grid()\n plt.show()\n self.A = np.array(coordinates_A[0::2])\n self.all_full_lengths = np.array(L_all)\n self.set_r()\n self.plot_3d_lines()\n\n def plot_3d_lines(self):\n \"\"\"\n Покадровая отрисовка геометрии стенда в 3D.\n :return: None\n \"\"\"\n pylab.figure(figsize=(12, 10))\n ax = pylab.axes(projection='3d')\n colors = {(0): 'r', (1): 'orange', (2): 'g', (3): 'olive', (4): 'b',\n (5): 'navy'}\n markers = {(0): '^', (1): '^', (2): 'o', (3): 'o', (4): '*', (5): '*'}\n for i, j in self.indexes:\n df_A = pd.Series(data=self.A_0[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n ax.scatter(x, y, z, c=colors[j], marker=markers[j], s=20.0)\n ax.legend(['1', '2', '3', '4', '5', '6'], loc=0)\n for i, j in self.indexes:\n k = 0\n for a, r in zip(self.A[i], self.r[j]):\n df_A = pd.Series(data=a, index=['x', 'y', 'z'])\n df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])\n df_r = pd.Series(data=r, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n x1 = [df_r.x, 0]\n y1 = [df_r.y, 0]\n z1 = [df_r.z, 0]\n x2 = [df_r.x, df_B.x]\n y2 = [df_r.y, df_B.y]\n z2 = [df_r.z, df_B.z]\n if k % int(self.steps - 1) == 0:\n ax.plot(x, y, z, c=colors[j], marker=markers[j])\n k += 1\n for i in range(0, self.steps, 9):\n a = np.array([self.A[0, i], self.A[1, i], self.A[2, i]])\n df_A = pd.DataFrame(data=a, columns=['x', 'y', 'z'])\n df_A = pd.concat((df_A, df_A.take([0])), axis=0)\n ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='gray')\n df_B = pd.DataFrame(data=self.B, columns=['x', 'y', 'z'])\n df_B = pd.concat((df_B, df_B.take([0])))\n df_A = pd.DataFrame(data=self.A_0, columns=['x', 'y', 'z'])\n df_A = pd.concat((df_A, df_A.take([0])), axis=0)\n ax.plot(df_B.x.values, df_B.y.values, df_B.z.values, c='black',\n linewidth=4.0)\n ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='black',\n linewidth=4.0)\n ax.view_init(30, -39)\n plt.show()\n\n def calculate_angles(self, l1, l2):\n \"\"\"\n Решение теоремы косинусов для поиска угла по трем сторонам\n :param l1: прилежащая сторона к вычисляемому углу\n :param l2: противолежащая сторона к углу\n :return: (alpha, teta, gamma) - углы в треугольнике сил\n \"\"\"\n cos_teta = (l1 ** 2 + (2 * self.a) ** 2 - l2 ** 2\n ) / 2 * l1 * 2 * self.a\n teta = np.arccos(cos_teta) * 180.0 / np.pi\n b = l1 ** 2 + self.a ** 2 - 2 * l1 * self.a * cos_teta\n cos_alpha = (l1 ** 2 + b ** 2 - l2 ** 2) / 2 * l1 * self.a\n alpha = np.arccos(cos_alpha) * 180.0 / np.pi\n gamma = 180.0 - teta - alpha\n return alpha, teta, gamma\n\n def set_r(self):\n \"\"\"\n Вычисление радиус-векторов плеч сил для каждого цилиндра\n :return: None\n \"\"\"\n r_all = []\n for i, j in self.indexes:\n r = []\n for a in self.A[i]:\n L = np.array(a - self.B[j])\n direct_L = L / np.linalg.norm(L)\n t1 = np.array([0, -direct_L[2], direct_L[1]])\n b1 = np.array([a[2] * direct_L[1] - a[1] * direct_L[2]])\n t2 = direct_L\n b2 = np.array([0])\n t3 = np.array([a[1] * direct_L[2] - a[2] * direct_L[1], -a[\n 0] * direct_L[2] + a[2] * direct_L[0], a[0] * direct_L[\n 1] - a[1] * direct_L[0]])\n b3 = np.array([0])\n T = np.stack((t1, t2, t3))\n b = np.stack((b1, b2, b3))\n r.append(np.linalg.solve(T, b).reshape((3,)))\n r_all.append(r)\n self.r = np.array(r_all)\n\n def solve_dynamic_forces(self):\n \"\"\"\n решение обратной задачи стенда\n Первое приближение - решение двумерной зазадчи для пооврота оси вокруг оси х\n :return: минимальная и максимальная нагрузка на каждый цилиндр\n \"\"\"\n print('####################################################')\n print('[INFO] solve DYNAMIC forces ...')\n print('####################################################')\n A = []\n for i in range(self.steps):\n a = []\n for j in range(3):\n a_ = self.A[j, i, :]\n a.append(a_)\n A.append(a)\n R = []\n for i in range(self.steps):\n r = []\n for j in range(6):\n r_ = self.r[j, i, :]\n r.append(r_)\n R.append(r)\n A = np.array(A)\n R = np.array(R)\n forces = []\n for a, r, t in zip(A, R, self.time):\n L = []\n direct = []\n shoulder = []\n for i, j in self.indexes:\n len = np.array(self.B[j] - a[i])\n dir = len / np.linalg.norm(len)\n L.append(len)\n direct_force_try = self.B[j] - r[j]\n direct.append(direct_force_try)\n shoulder.append(np.cross(r[j], direct_force_try))\n L = np.array(L)\n T_static = np.array(direct).T\n T_dynamics = np.array(shoulder).T\n b_static = np.array([-self.m * 9.8, 0, 0]).reshape((3, 1))\n dynamic_comp = None\n if self.axis == 'x':\n comp = self.J[2, 2] * self.prime2_fi_x(t)\n dynamic_comp = np.array([comp, 0, 0]).reshape((3, 1))\n elif self.axis == 'y':\n comp = self.J[1, 1] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, comp, 0]).reshape((3, 1))\n elif self.axis == 'z':\n comp = self.J[0, 0] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, 0, comp]).reshape((3, 1))\n b_dynamic = dynamic_comp\n T = T_dynamics[:, :3]\n b = b_dynamic[:, :3]\n dynamic_f = np.linalg.solve(T, b).reshape((3,))\n forces.append(dynamic_f)\n print('[INFO] time:', t)\n print('[INFO] length:', [round(np.linalg.norm(l), 4) for l in L])\n print('[INFO] shoulders:', [round(np.linalg.norm(l), 4) for l in\n np.array(shoulder)])\n print('[INFO] forces:', [round(f, 4) for f in dynamic_f])\n print('[INFO] dynamic component:', b_dynamic.T)\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'rx-', (2): 'g+--', (3): 'gx-', (4):\n 'b+--', (5): 'bx-'}\n for i, j in self.indexes:\n pylab.plot(self.time, forces[i], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$', '$F_4$', '$F_5$', '$F_6$'],\n loc=0)\n pylab.title('Dynamic forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def solve_static_forces(self):\n \"\"\"\n Решение обратной задачи стедна для статических нагрузок\n :return: компоненты силы для каждой опоры\n \"\"\"\n print('####################################################')\n print('[INFO] solve STATIC forces ...')\n print('####################################################')\n x_symmetry_ind = [[0, 0], [0, 1], [1, 2]]\n forces = []\n for a, t in zip(self.A.reshape((self.steps, 3, 3)), self.time):\n L1 = np.array(a[0] - self.B[0])\n L2 = np.array(a[0] - self.B[1])\n L3 = np.array(a[1] - self.B[2])\n direct_L1 = L1 / np.linalg.norm(L1)\n direct_L2 = L2 / np.linalg.norm(L2)\n direct_L3 = L3 / np.linalg.norm(L3)\n T = np.stack((direct_L1, direct_L2, direct_L3))\n b = np.array([-self.m * 9.8 / 2, 0, 0]).reshape((3, 1))\n static_f = np.linalg.solve(T, b).reshape((3,)) / 2\n forces.append(static_f)\n print('[INFO] time:', t)\n print('[INFO] length:', round(np.linalg.norm(L1), 4), round(np.\n linalg.norm(L2), 4), round(np.linalg.norm(L3), 4))\n print('[INFO] forces:', round(static_f[0] / 2, 4), round(\n static_f[1] / 2, 4), round(static_f[2] / 2, 4))\n print('****************************************************')\n forces = np.array(forces).T\n colors = {(0): 'r+--', (1): 'g+--', (2): 'b+--'}\n for i, j in x_symmetry_ind:\n pylab.plot(self.time, forces[j], colors[j])\n pylab.legend(['$F_1$', '$F_2$', '$F_3$'], loc=0)\n pylab.title('Static forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n plt.show()\n\n def plot_animate(self, A):\n \"\"\"\"\n try to create animate function to plot mechanisms\n \"\"\"\n fig = plt.figure()\n fig.set_tight_layout(False)\n ax = plt.axes(projection='3d')\n global cnt\n cnt = ax\n global cur_A\n global cur_B\n cur_A = A[0]\n cur_B = self.B[0]\n\n def steps(count=1):\n for i in range(count):\n df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n cnt.plot(x, y, z)\n\n def animate(frame):\n steps(1)\n return cnt\n anim = animation.FuncAnimation(fig, animate, frames=100)\n plt.show()\n\n\nif __name__ == '__main__':\n hex = Hexapod(axis='y')\n hex.set_B()\n hex.get_delta_L()\n hex.solve_static_forces()\n hex.solve_dynamic_forces()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib.animation as animation\nimport pylab\nfrom mpl_toolkits import mplot3d\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nclass Hexapod:\n def __init__(self, axis):\n \"\"\"\n Инициализация начальных параметров системы\n :param axis: ось вращения системы (x, y, z)\n \"\"\"\n self.axis = axis # ось вращения тела\n self.alpha = 30. # угол между приводами в точке крпления\n self.beta = 30. # двугранный угол между приводами и платформой\n self.L = 1.5 # длина привода\n self.h_c = 2. # высота центра масс тела\n self.r = 1. # радиус тела\n self.m_p = 1000. # масса платформы\n self.m = 4000. # масса тела\n self.nu = 0.5 # частота\n\n # тензор инерции тела для решения обратной задачи\n self.J = np.array([[5000, 0, 0],\n [0, 5000, 0],\n [0, 0, 3500]], np.float32)\n\n # начальное положение точек крепления приводов на ВЕРХНЕЙ платформе\n self.A_0 = np.round([[self.r*np.sin(2*np.pi/3*i + np.pi),\n self.r*np.cos(2*np.pi/3*i + np.pi),\n -self.h_c] for i in range(-1, 2)], 5)\n\n # точки крепления приводов на НИЖНЕЙ платформе (const)\n self.B = np.array([])\n # положение точек крепления приводов на ВЕРХНЕЙ платформе за все время\n self.A = np.array([])\n # длина каждого привода за все время\n self.all_full_lengths = np.array([])\n # плечи сил приводов за все время\n self.r = np.array([])\n\n # ампилитуда вращения, закон изменения угла и его производные по OX\n self.fi_x_0 = 4. # градусы\n self.fi_x = lambda t: self.fi_x_0 * np.sin(2*np.pi*self.nu*t)\n self.prime_fi_x = lambda t: self.fi_x_0 * 2*np.pi*self.nu * np.cos(2*np.pi*self.nu*t)\n self.prime2_fi_x = lambda t: -self.fi_x_0 * (2*np.pi*self.nu)**2 * np.sin(2*np.pi*self.nu*t)\n\n # ампилитуда вращения, закон изменения угла и его производные по OY и OZ\n self.fi_y_0 = 4. # градусы\n self.fi_y = lambda t: self.fi_y_0 * np.sin(2*np.pi*self.nu*t)\n self.prime_fi_y = lambda t: self.fi_y_0 * 2*np.pi*self.nu * np.cos(2*np.pi*self.nu*t)\n self.prime2_fi_y = lambda t: -self.fi_y_0 * (2*np.pi*self.nu)**2 * np.sin(2*np.pi*self.nu*t)\n\n # матрица поворота вокруг оси OX\n self.R_matrix_x = lambda t: np.round([[np.cos(self.fi_x(t)*np.pi/180.), -np.sin(self.fi_x(t)*np.pi/180.), 0],\n [np.sin(self.fi_x(t)*np.pi/180.), np.cos(self.fi_x(t)*np.pi/180.), 0],\n [0, 0, 1]], 5)\n\n # матрица поворота вокруг оси OY\n self.R_matrix_y = lambda t: np.round([[1, 0, 0],\n [0, np.cos(self.fi_y(t)*np.pi/180.), -np.sin(self.fi_y(t)*np.pi/180.)],\n [0, np.sin(self.fi_y(t)*np.pi/180.), np.cos(self.fi_y(t)*np.pi/180.)]], 5)\n\n # матрица поворота вокруг оси OZ\n self.R_matrix_z = lambda t: np.round([[np.cos(self.fi_y(t)*np.pi/180.), 0, np.sin(self.fi_y(t)*np.pi/180.)],\n [0, 1, 0],\n [-np.sin(self.fi_y(t)*np.pi/180.), 0, np.cos(self.fi_y(t)*np.pi/180.)]], 5)\n\n # для построения геометрии точек B\n self.H = np.cos(np.pi/180. * self.beta) * np.cos(np.pi/180. * self.alpha/2) * self.L\n self.h = self.L * np.cos(np.pi/180.*self.alpha/2) * np.sin(np.pi/180.*self.beta)\n self.a = self.L * np.sin(np.pi/180.*self.alpha/2) # основание треугольника\n self.r = (self.h**2 + self.a**2)**0.5\n\n # отсчет времени для расчета законов\n self.end_time = 2.0\n self.start_time = 0.\n self.steps = 100\n self.time = np.linspace(self.start_time, self.end_time, self.steps)\n\n # связь индексов нижней и верхней платформы\n self.indexes = [[0, 0], [0, 1], [1, 2], [1, 3], [2, 4], [2, 5]]\n\n def set_B(self):\n \"\"\"\n Расчет геометрии стенда - положение точек B_i.\n\n Задается: self.B\n :return: None\n \"\"\"\n for i, A in enumerate(self.A_0):\n a = A[:2]\n b1 = np.array([self.h, self.a])\n b2 = np.array([self.h, - self.a])\n\n kappa = np.array([[np.cos(np.pi / 180 * (30-120*i)), -np.sin(np.pi / 180 * (30-120*i))],\n [np.sin(np.pi / 180 * (30-120*i)), np.cos(np.pi / 180 * (30-120*i))]])\n\n p1 = np.dot(kappa, b1) + a\n p2 = np.dot(kappa, b2) + a\n p1 = np.append(p1, - self.H - self.h_c)\n p2 = np.append(p2, - self.H - self.h_c)\n self.B = np.hstack((self.B, p1))\n self.B = np.hstack((self.B, p2))\n\n self.B = self.B.reshape(6, 3)\n\n # проверка длин приводов\n i = 0\n for A in self.A_0:\n assert np.linalg.norm(np.subtract(A, self.B[i])) - self.L <= 1e-4\n assert np.linalg.norm(np.subtract(A, self.B[i + 1])) - self.L <= 1e-4\n # print(np.linalg.norm(np.subtract(A, self.B[i])))\n # print(np.linalg.norm(np.subtract(A, self.B[i + 1])))\n i += 2\n\n def get_delta_L(self):\n \"\"\"\n Расчет геометрии положения точек A_i в каждый момент времени.\n Отрисовка графиков изменения длин, скорости и ускорения для каждого привода по времени.\n\n Задается: self.A, self.all_full_lengths, self.set_r\n :return: None\n \"\"\"\n print('####################################################')\n print('[INFO] solve delta L, Velocity, Acceleration ...')\n print('####################################################')\n # матрица поворота вокруг зазадной оси\n R_matrix = None\n if self.axis == 'x':\n R_matrix = self.R_matrix_x\n elif self.axis == 'y':\n R_matrix = self.R_matrix_y\n elif self.axis == 'z':\n R_matrix = self.R_matrix_z\n\n # удлинения каждого цилиндра за заданное время\n dL_all = []\n # длины всех цилиндров за все время\n L_all = []\n # координаты точек крепления на ВЕРХНЕЙ платформе\n coordinates_A = []\n # легенда для графиков\n colors = {0: 'r+--', 1: 'rx-',\n 2: 'g+--', 3: 'gx-',\n 4: 'b+--', 5: 'bx-'}\n\n for i, j in self.indexes:\n print('[INFO] Поршень №{}'.format(j+1))\n dl = [] # изменение длины поршня в момент времени t\n l = [] # длины поршня в момент времени t\n coord = [] # координата точки A_i в момент времени t\n for t in self.time:\n try:\n A = np.dot(R_matrix(t), self.A_0[i])\n except Exception:\n print('Type error axis')\n\n # текущая длина привода\n L = np.linalg.norm(self.B[j] - A)\n print(self.B[j] - A)\n print(self.L, L)\n # L = np.sum((A - self.B[j])**2)**0.5\n print('dL[мм] = {:.5f}'.format((L - self.L) * 1e3))\n l.append(L)\n dl.append(round(((L - self.L) * 1e3), 5))\n coord.append(A)\n\n dL_all.append(dl)\n L_all.append(l)\n coordinates_A.append(coord)\n\n # численно находим СКОРОСТЬ изменения длины приводов\n v = [0.0]\n for k in range(self.steps - 1):\n v.append((dl[k+1] - dl[k]) / (self.time[k+1] - self.time[k]))\n pylab.figure(1)\n pylab.plot(self.time[5:], v[5:], colors[j])\n print('[INFO] v_max =', np.max(np.abs(v[5:])))\n\n # численно находим УСКОРЕНИЕ изменения длины приводов\n a = [0.0]\n for k in range(self.steps - 1):\n a.append((v[k + 1] - v[k]) / (self.time[k + 1] - self.time[k]))\n pylab.figure(2)\n pylab.plot(self.time[5:], a[5:], colors[j])\n print('[INFO] a_max =', np.max(np.abs(a[5:])))\n print('****************************************************')\n\n # легенда для графика со скоростями\n pylab.figure(1)\n pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)\n pylab.title('Velocity')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Velocity [mm/s]')\n pylab.grid()\n # plt.savefig(\"output/velocity_{}.png\".format(self.axis))\n\n # легенда для графика с ускорениями\n pylab.figure(2)\n pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)\n pylab.title('Acceleration')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Acceleration [mm/s^2]')\n pylab.grid()\n # pylab.savefig(\"output/acceleration_{}.png\".format(self.axis))\n\n # график удлинения каждого поршня\n pylab.figure(3)\n for i in range(6):\n pylab.plot(self.time, dL_all[i], colors[i])\n pylab.legend([r'1 line', '2 line', '3 line', '4 line', '5 line', '6 line'], loc=0)\n pylab.title('Delta length')\n pylab.xlabel('Time [s]')\n pylab.ylabel('dL [mm]')\n pylab.grid()\n # pylab.savefig(\"output/length_{}.png\".format(self.axis))\n plt.show()\n\n # исключим повторение вершин\n self.A = np.array(coordinates_A[0::2])\n self.all_full_lengths = np.array(L_all)\n self.set_r()\n # покадровая отрисовка геометрии стенда\n self.plot_3d_lines()\n\n # self.plot_animate(coordinates_A)\n\n def plot_3d_lines(self):\n \"\"\"\n Покадровая отрисовка геометрии стенда в 3D.\n :return: None\n \"\"\"\n pylab.figure(figsize=(12, 10))\n ax = pylab.axes(projection='3d')\n\n colors = {0: 'r', 1: 'orange',\n 2: 'g', 3: 'olive',\n 4: 'b', 5: 'navy'}\n markers = {0: '^', 1: '^',\n 2: 'o', 3: 'o',\n 4: '*', 5: '*'}\n\n # задать легенду\n for i, j in self.indexes:\n df_A = pd.Series(data=self.A_0[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])\n\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n ax.scatter(x, y, z, c=colors[j], marker=markers[j], s=20.)\n ax.legend([r'1', '2', '3', '4', '5', '6'], loc=0)\n\n # indexes = [[0, 0], [1, 2], [2, 4]]\n # построить смещения каждого поршня\n for i, j in self.indexes:\n k = 0\n for (a, r) in zip(self.A[i], self.r[j]):\n df_A = pd.Series(data=a, index=['x', 'y', 'z'])\n df_B = pd.Series(data=self.B[j], index=['x', 'y', 'z'])\n df_r = pd.Series(data=r, index=['x', 'y', 'z'])\n\n # геометрия длины цилиндров\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n\n # геометрия плеч сил\n x1 = [df_r.x, 0]\n y1 = [df_r.y, 0]\n z1 = [df_r.z, 0]\n\n # продолжение оси цилиндров\n x2 = [df_r.x, df_B.x]\n y2 = [df_r.y, df_B.y]\n z2 = [df_r.z, df_B.z]\n\n # частичная раскадровка\n if k % int(self.steps-1) == 0:\n # if k:\n # ax.plot(x1, y1, z1, c=colors[j], marker=markers[j])\n # ax.plot(x2, y2, z2, c='gray', marker='+')\n ax.plot(x, y, z, c=colors[j], marker=markers[j])\n # print('H_A =', z[0])\n k += 1\n\n # посторить смещение верхней плтаформы\n for i in range(0, self.steps, 9):\n a = np.array([self.A[0, i], self.A[1, i], self.A[2, i]])\n df_A = pd.DataFrame(data=a, columns=['x', 'y', 'z'])\n df_A = pd.concat((df_A, df_A.take([0])), axis=0)\n\n ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='gray')\n\n # отрисовать начальные положения верхней и нижней платформы\n df_B = pd.DataFrame(data=self.B, columns=['x', 'y', 'z'])\n df_B = pd.concat((df_B, df_B.take([0])))\n df_A = pd.DataFrame(data=self.A_0, columns=['x', 'y', 'z'])\n df_A = pd.concat((df_A, df_A.take([0])), axis=0)\n\n ax.plot(df_B.x.values, df_B.y.values, df_B.z.values, c='black', linewidth=4.)\n ax.plot(df_A.x.values, df_A.y.values, df_A.z.values, c='black', linewidth=4.)\n\n ax.view_init(30, -39)\n # pylab.savefig(\"output/plot_3d_{}.png\".format(self.axis))\n plt.show()\n\n def calculate_angles(self, l1, l2):\n \"\"\"\n Решение теоремы косинусов для поиска угла по трем сторонам\n :param l1: прилежащая сторона к вычисляемому углу\n :param l2: противолежащая сторона к углу\n :return: (alpha, teta, gamma) - углы в треугольнике сил\n \"\"\"\n cos_teta = (l1**2 + (2*self.a)**2 - l2**2) / 2*l1*2*self.a\n teta = np.arccos(cos_teta) * 180. / np.pi\n b = l1**2 + self.a**2 - 2*l1*self.a*cos_teta\n cos_alpha = (l1**2 + b**2 - l2**2) / 2*l1*self.a\n alpha = np.arccos(cos_alpha) * 180. / np.pi\n gamma = 180. - teta - alpha\n return alpha, teta, gamma\n\n def set_r(self):\n \"\"\"\n Вычисление радиус-векторов плеч сил для каждого цилиндра\n :return: None\n \"\"\"\n r_all = []\n for i, j in self.indexes:\n r = []\n for a in self.A[i]:\n L = np.array(a - self.B[j])\n direct_L = L / np.linalg.norm(L)\n t1 = np.array([0, -direct_L[2], direct_L[1]])\n b1 = np.array([a[2]*direct_L[1] - a[1]*direct_L[2]])\n t2 = direct_L\n b2 = np.array([0])\n t3 = np.array([a[1]*direct_L[2] - a[2]*direct_L[1],\n -a[0]*direct_L[2] + a[2]*direct_L[0],\n a[0]*direct_L[1] - a[1]*direct_L[0]])\n b3 = np.array([0])\n T = np.stack((t1, t2, t3))\n b = np.stack((b1, b2, b3))\n\n r.append(np.linalg.solve(T, b).reshape((3,)))\n r_all.append(r)\n\n self.r = np.array(r_all)\n\n def solve_dynamic_forces(self):\n \"\"\"\n решение обратной задачи стенда\n Первое приближение - решение двумерной зазадчи для пооврота оси вокруг оси х\n :return: минимальная и максимальная нагрузка на каждый цилиндр\n \"\"\"\n print('####################################################')\n print('[INFO] solve DYNAMIC forces ...')\n print('####################################################')\n A = []\n for i in range(self.steps):\n a = []\n for j in range(3):\n a_ = self.A[j, i, :]\n a.append(a_)\n A.append(a)\n\n R = []\n for i in range(self.steps):\n r = []\n for j in range(6):\n r_ = self.r[j, i, :]\n r.append(r_)\n R.append(r)\n\n A = np.array(A)\n R = np.array(R)\n forces = []\n for a, r, t in zip(A, R, self.time):\n L = []\n direct = [] # направления сил\n shoulder = [] # плечи сил\n for i, j in self.indexes:\n len = np.array(self.B[j] - a[i])\n dir = len / np.linalg.norm(len)\n L.append(len)\n direct_force_try = self.B[j] - r[j]\n # direct.append(dir)\n direct.append(direct_force_try)\n # direct.append(direct_force_try)\n shoulder.append(np.cross(r[j], direct_force_try))\n L = np.array(L)\n\n T_static = np.array(direct).T\n T_dynamics = np.array(shoulder).T\n\n b_static = np.array([-self.m*9.8, 0, 0]).reshape((3, 1))\n\n # определение направления действующих сил\n dynamic_comp = None\n if self.axis == 'x':\n comp = self.J[2, 2] * self.prime2_fi_x(t)\n dynamic_comp = np.array([comp, 0, 0]).reshape((3, 1))\n elif self.axis == 'y':\n comp = self.J[1, 1] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, comp, 0]).reshape((3, 1))\n elif self.axis == 'z':\n comp = self.J[0, 0] * self.prime2_fi_y(t)\n dynamic_comp = np.array([0, 0, comp]).reshape((3, 1))\n b_dynamic = dynamic_comp\n\n # T = np.vstack((T_static, T_dynamics))\n # b = np.vstack((b_static, b_dynamic))\n\n T = T_dynamics[:, :3]\n b = b_dynamic[:, :3]\n # print(T)\n # print(b)\n\n dynamic_f = np.linalg.solve(T, b).reshape((3,))\n forces.append(dynamic_f)\n\n print('[INFO] time:', t)\n print('[INFO] length:', [round(np.linalg.norm(l), 4) for l in L])\n print('[INFO] shoulders:', [round(np.linalg.norm(l), 4) for l in np.array(shoulder)])\n print('[INFO] forces:', [round(f, 4) for f in dynamic_f])\n print('[INFO] dynamic component:', b_dynamic.T)\n print('****************************************************')\n forces = np.array(forces).T\n\n # график приложенной силы к цилиндрам от времени\n colors = {0: 'r+--', 1: 'rx-',\n 2: 'g+--', 3: 'gx-',\n 4: 'b+--', 5: 'bx-'}\n for i, j in self.indexes:\n pylab.plot(self.time, forces[i], colors[j])\n # colors = {0: 'r+--', 1: 'g+--', 2: 'b+--'}\n # for i in range(3):\n # pylab.plot(self.time, forces[i], colors[i], label='$F_{}$'.format(i))\n pylab.legend([r'$F_1$', '$F_2$', '$F_3$', '$F_4$', '$F_5$', '$F_6$'], loc=0)\n # plt.legend(loc=\"lower right\")\n pylab.title('Dynamic forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n\n plt.show()\n\n def solve_static_forces(self):\n \"\"\"\n Решение обратной задачи стедна для статических нагрузок\n :return: компоненты силы для каждой опоры\n \"\"\"\n print('####################################################')\n print('[INFO] solve STATIC forces ...')\n print('####################################################')\n x_symmetry_ind = [[0, 0], [0, 1], [1, 2]]\n forces = []\n for a, t in zip(self.A.reshape((self.steps, 3, 3)), self.time):\n L1 = np.array(a[0] - self.B[0])\n L2 = np.array(a[0] - self.B[1])\n L3 = np.array(a[1] - self.B[2])\n\n direct_L1 = L1 / np.linalg.norm(L1)\n direct_L2 = L2 / np.linalg.norm(L2)\n direct_L3 = L3 / np.linalg.norm(L3)\n\n T = np.stack((direct_L1, direct_L2, direct_L3))\n b = np.array([-self.m*9.8/2, 0, 0]).reshape((3, 1))\n\n static_f = np.linalg.solve(T, b).reshape((3,)) / 2\n forces.append(static_f)\n print('[INFO] time:', t)\n print('[INFO] length:',\n round(np.linalg.norm(L1), 4),\n round(np.linalg.norm(L2), 4),\n round(np.linalg.norm(L3), 4))\n print('[INFO] forces:',\n round(static_f[0]/2, 4),\n round(static_f[1]/2, 4),\n round(static_f[2]/2, 4))\n print('****************************************************')\n forces = np.array(forces).T\n\n # график приложенной силы к цилиндрам от времени\n colors = {0: 'r+--', 1: 'g+--', 2: 'b+--'}\n for i, j in x_symmetry_ind:\n pylab.plot(self.time, forces[j], colors[j])\n pylab.legend([r'$F_1$', '$F_2$', '$F_3$'], loc=0)\n pylab.title('Static forces')\n pylab.xlabel('Time [s]')\n pylab.ylabel('Force [kg*m/s^2]')\n pylab.grid()\n\n plt.show()\n\n def plot_animate(self, A):\n \"\"\"\"\n try to create animate function to plot mechanisms\n \"\"\"\n fig = plt.figure()\n fig.set_tight_layout(False)\n ax = plt.axes(projection='3d')\n global cnt\n cnt = ax\n\n global cur_A\n global cur_B\n cur_A = A[0]\n cur_B = self.B[0]\n\n def steps(count=1):\n for i in range(count):\n df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n cnt.plot(x, y, z)\n\n def animate(frame):\n steps(1)\n return cnt\n anim = animation.FuncAnimation(fig, animate, frames=100)\n plt.show()\n\n\nif __name__ == \"__main__\":\n hex = Hexapod(axis='y')\n\n hex.set_B()\n hex.get_delta_L()\n hex.solve_static_forces()\n hex.solve_dynamic_forces()",
"step-ids": [
6,
9,
10,
11,
13
]
}
|
[
6,
9,
10,
11,
13
] |
import random
import profile_handler
import re
class RollBot():
"""A class that handles the bulk of functionality"""
def __init__(self):
"""initializes the attributes of the class"""
# this is where the procesed user input gets stored for easy readbacks
self.input_last_roll = ''
# an empty list to store the results of the roll in
self.last_roll = []
# The sum of all the roles inside the last_roll list
self.result = 0
self.error = ''
self.number_of_dice = ''
self.size_of_dice = ''
self.modifier = ''
self.modifier_number = ''
self.sort = False
self.adv = False
self.hidden = False
# a flag to save the number of the dropped roll on an adv roll
self.dropped_roll = ''
# flags for rolling stats for a char
self.d_stats = {}
self.dropped_d_stats = {}
self.result_d_stats = {}
# flag for the art/meme/hidden rolls dictionary
self.art_dict = {}
self.meme_dict = {}
self.hidden_rolls = {}
def roll_input(self, user_input, optional_input):
"""complicated as fuck function that handles input without breaking"""
# Resets the status of everything before a new roll
self.last_roll = []
self.result = 0
self.hidden = False
# An empty error flag to easily throw errors back through discord
self.error = ''
self.number_of_dice = ''
self.size_of_dice = ''
# The modifier is either a + or a -, stored for easy acces
self.modifier = ''
self.modifier_number = ''
self.dropped_roll = ''
# this code is run in try to catch atribute errors due to a wrong input
try:
# All parts filtered by regex get stored in an object,
# that then gets split
split_input = dice_handler.match(user_input)
# sets the number of dice in the class for use in other functions
self.number_of_dice = split_input.group(1)
# sets the size of the dice in the class for use in other functions
self.size_of_dice = split_input.group(3)
# sets the +/- in the class for use in other functions
self.modifier = split_input.group(5)
# sets the number of the mod in the class to use in other functions
self.modifier_number = split_input.group(6)
# An if statements that alows typing 1 for rolling 1 dice to be
# optional.
if self.number_of_dice == '':
self.number_of_dice = 1
# Makes sure atleast 1 dice is rolled
if self.number_of_dice == '0':
self.error = "You can't roll 0 dice"
# Sets a cap of 200 dice being rolled
if int(self.number_of_dice) > 200:
self.error = \
'No! Thats to many dice I do not have that many!!!'
return
# Meant to catch errors where a none size dice managed to sneak
# Through.
if self.size_of_dice == '0' or self.size_of_dice is None:
self.error = "Please define the dice size."
# Sets a cap on how large of a dice you can roll.
if int(self.size_of_dice) > 50000:
self.error = "Dice too big!" + \
" That has gotta be fake nothing goes this high"
return
# Checks wether no modifier was entered or if it was incorrectly
# entered by checking the lenght of the input vs what came through.
if self.modifier is None and \
len(str(user_input)) > \
len(str(self.number_of_dice) +
str(self.size_of_dice) + 'D'):
self.error = " Incorrect modifier. Please use + or -"
return
# Sets modifier to +0 if no +/- is entered.
if self.modifier == '' or self.modifier is None:
self.modifier = '+'
self.modifier_number = '0'
# Sets modifier to +0 if no number for it was entered
if self.modifier_number == '' or self.modifier_number is None:
self.modifier = '+'
self.modifier_number = '0'
# The full input of the user in 1 flag to print back to the user
# at the end.
self.input_last_roll = \
' `Rolled ' + \
str(self.number_of_dice) + \
'd' + \
str(self.size_of_dice) + \
str(self.modifier) + \
str(self.modifier_number) + \
':` '
if optional_input.lower() == 'adv':
self.adv = 'adv'
self.handle_adv()
# Checks if user asked for disadvantage on a roll and hands it off
elif optional_input.lower() == 'dadv':
self.adv = 'dadv'
self.handle_adv()
# Checks if user asked for a sorted roll
elif optional_input.lower() == 'sort':
# Rolls the dice like normal but sorts the flag after.
self.sort = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
elif optional_input.lower() == 'hide':
# Rolls the dice like normal but does not show the result in channel.
self.hidden = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
elif optional_input.lower() != '':
self.error = str(optional_input) + \
" is not a valid option. Please try (sort/adv/dadv/hide)"
else:
# If everything passed the checks hand offs the proccesed input
# to the randomizing and calculating functions.
self.roll_dice(self.number_of_dice, self.size_of_dice)
# Catches and attribute error on a wrong input and notifies the user.
except AttributeError:
self.error = \
" Invalid input please follow this format (1)d20(+/-(5))"
except ValueError:
self.error = \
" Invalid input, please Make sure dice size is bigger than 0"
def roll_dice(self, number_of_dice, size_of_dice):
"""Simple function that rolls dice"""
# makes a list of random numbers based on the information
# that was put in
dice = []
for roll in range(int(number_of_dice)):
roll = random.randint(1, int(size_of_dice))
dice.append(roll)
# Checks wether the result needs to be sorted or not
if self.sort is True:
dice.sort()
# Turns ints into strings after sorting
converted_dice = []
for i in range(len(dice)):
roll_to_convert = dice[i]
roll_to_convert = str(roll_to_convert)
converted_dice.append(roll_to_convert)
# Sets the last roll flag and returns to sort flag to false
self.last_roll = converted_dice
self.sort = False
# Sets the last roll flag for easy cross function use.
else:
# Turns Ints into strings incase it had to be sorted
converted_dice = []
for i in range(len(dice)):
roll_to_convert = dice[i]
roll_to_convert = str(roll_to_convert)
converted_dice.append(roll_to_convert)
self.last_roll = converted_dice
def calculate_roll(self):
"""Function to calculate the sum of the roll"""
# Takes all the numbers from the last roll and adds them up.
for i in self.last_roll:
self.result = int(self.result) + int(i)
self.result = self.result + int(self.modifier + self.modifier_number)
def handle_adv(self):
"""Function that handles the optional advantage options"""
# This part handles advantage so it takes the highest of the 2 numbers
# and then drops the lowest number
if self.adv == 'adv':
# Checks wether the number that was input is not 1 or 2.
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) != '1':
self.error = 'Can only roll advantage with 2 dice, ya dummy!'
# Checks if number of dice was left blank so automatically set to 1
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) == '1':
self.number_of_dice = 2
# Checks if the number of dice is 2 before moving on
if str(self.number_of_dice) == '2':
self.sort = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
# Stores the dropped roll before deleting it from last_roll
self.dropped_roll = self.last_roll[0]
del self.last_roll[0]
# Returns flag to default state
self.adv = False
# This part handles disadvantage so it takes the lowest of the 2
# numbers and then drops the highest number
if self.adv == 'dadv':
# Checks wether the number that was input is not 1 or 2.
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) != '1':
self.error = \
'Can only roll disadvantage with 2 dice, ya dummy!'
# Checks if number of dice was left blank so automatically set to 1
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) == '1':
self.number_of_dice = 2
# Checks if the number of dice is 2 before moving on
if str(self.number_of_dice) == '2':
self.sort = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
# Stores the dropped roll before deleting it from last_roll
self.dropped_roll = self.last_roll[1]
del self.last_roll[1]
# Returns flag to default state
self.adv = False
def roll_stats(self):
"""Lets you roll new stats for a char"""
for stat in range(6):
self.roll_input('4d6', 'sort')
self.dropped_d_stats[stat] = self.last_roll[0]
del self.last_roll[0]
self.calculate_roll()
self.result_d_stats[stat] = self.result
self.d_stats[stat] = self.last_roll
# The regex that looks through the input for key information.
dice_handler = re.compile(r'(\d*)([dD])(\d*)(([+-])(\d*))?')
ph = profile_handler.PHandler()
|
normal
|
{
"blob_id": "301a6ec56bd265ff63a924ecd64d6708cb6b139c",
"index": 8419,
"step-1": "<mask token>\n\n\nclass RollBot:\n <mask token>\n\n def __init__(self):\n \"\"\"initializes the attributes of the class\"\"\"\n self.input_last_roll = ''\n self.last_roll = []\n self.result = 0\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.sort = False\n self.adv = False\n self.hidden = False\n self.dropped_roll = ''\n self.d_stats = {}\n self.dropped_d_stats = {}\n self.result_d_stats = {}\n self.art_dict = {}\n self.meme_dict = {}\n self.hidden_rolls = {}\n <mask token>\n\n def roll_dice(self, number_of_dice, size_of_dice):\n \"\"\"Simple function that rolls dice\"\"\"\n dice = []\n for roll in range(int(number_of_dice)):\n roll = random.randint(1, int(size_of_dice))\n dice.append(roll)\n if self.sort is True:\n dice.sort()\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n self.sort = False\n else:\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n\n def calculate_roll(self):\n \"\"\"Function to calculate the sum of the roll\"\"\"\n for i in self.last_roll:\n self.result = int(self.result) + int(i)\n self.result = self.result + int(self.modifier + self.modifier_number)\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RollBot:\n <mask token>\n\n def __init__(self):\n \"\"\"initializes the attributes of the class\"\"\"\n self.input_last_roll = ''\n self.last_roll = []\n self.result = 0\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.sort = False\n self.adv = False\n self.hidden = False\n self.dropped_roll = ''\n self.d_stats = {}\n self.dropped_d_stats = {}\n self.result_d_stats = {}\n self.art_dict = {}\n self.meme_dict = {}\n self.hidden_rolls = {}\n\n def roll_input(self, user_input, optional_input):\n \"\"\"complicated as fuck function that handles input without breaking\"\"\"\n self.last_roll = []\n self.result = 0\n self.hidden = False\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.dropped_roll = ''\n try:\n split_input = dice_handler.match(user_input)\n self.number_of_dice = split_input.group(1)\n self.size_of_dice = split_input.group(3)\n self.modifier = split_input.group(5)\n self.modifier_number = split_input.group(6)\n if self.number_of_dice == '':\n self.number_of_dice = 1\n if self.number_of_dice == '0':\n self.error = \"You can't roll 0 dice\"\n if int(self.number_of_dice) > 200:\n self.error = (\n 'No! Thats to many dice I do not have that many!!!')\n return\n if self.size_of_dice == '0' or self.size_of_dice is None:\n self.error = 'Please define the dice size.'\n if int(self.size_of_dice) > 50000:\n self.error = ('Dice too big!' +\n ' That has gotta be fake nothing goes this high')\n return\n if self.modifier is None and len(str(user_input)) > len(str(\n self.number_of_dice) + str(self.size_of_dice) + 'D'):\n self.error = ' Incorrect modifier. Please use + or -'\n return\n if self.modifier == '' or self.modifier is None:\n self.modifier = '+'\n self.modifier_number = '0'\n if self.modifier_number == '' or self.modifier_number is None:\n self.modifier = '+'\n self.modifier_number = '0'\n self.input_last_roll = ' `Rolled ' + str(self.number_of_dice\n ) + 'd' + str(self.size_of_dice) + str(self.modifier) + str(\n self.modifier_number) + ':` '\n if optional_input.lower() == 'adv':\n self.adv = 'adv'\n self.handle_adv()\n elif optional_input.lower() == 'dadv':\n self.adv = 'dadv'\n self.handle_adv()\n elif optional_input.lower() == 'sort':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n elif optional_input.lower() == 'hide':\n self.hidden = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n elif optional_input.lower() != '':\n self.error = str(optional_input\n ) + ' is not a valid option. Please try (sort/adv/dadv/hide)'\n else:\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n except AttributeError:\n self.error = (\n ' Invalid input please follow this format (1)d20(+/-(5))')\n except ValueError:\n self.error = (\n ' Invalid input, please Make sure dice size is bigger than 0')\n\n def roll_dice(self, number_of_dice, size_of_dice):\n \"\"\"Simple function that rolls dice\"\"\"\n dice = []\n for roll in range(int(number_of_dice)):\n roll = random.randint(1, int(size_of_dice))\n dice.append(roll)\n if self.sort is True:\n dice.sort()\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n self.sort = False\n else:\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n\n def calculate_roll(self):\n \"\"\"Function to calculate the sum of the roll\"\"\"\n for i in self.last_roll:\n self.result = int(self.result) + int(i)\n self.result = self.result + int(self.modifier + self.modifier_number)\n\n def handle_adv(self):\n \"\"\"Function that handles the optional advantage options\"\"\"\n if self.adv == 'adv':\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) != '1':\n self.error = 'Can only roll advantage with 2 dice, ya dummy!'\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) == '1':\n self.number_of_dice = 2\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n self.dropped_roll = self.last_roll[0]\n del self.last_roll[0]\n self.adv = False\n if self.adv == 'dadv':\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) != '1':\n self.error = (\n 'Can only roll disadvantage with 2 dice, ya dummy!')\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) == '1':\n self.number_of_dice = 2\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n self.dropped_roll = self.last_roll[1]\n del self.last_roll[1]\n self.adv = False\n\n def roll_stats(self):\n \"\"\"Lets you roll new stats for a char\"\"\"\n for stat in range(6):\n self.roll_input('4d6', 'sort')\n self.dropped_d_stats[stat] = self.last_roll[0]\n del self.last_roll[0]\n self.calculate_roll()\n self.result_d_stats[stat] = self.result\n self.d_stats[stat] = self.last_roll\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RollBot:\n \"\"\"A class that handles the bulk of functionality\"\"\"\n\n def __init__(self):\n \"\"\"initializes the attributes of the class\"\"\"\n self.input_last_roll = ''\n self.last_roll = []\n self.result = 0\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.sort = False\n self.adv = False\n self.hidden = False\n self.dropped_roll = ''\n self.d_stats = {}\n self.dropped_d_stats = {}\n self.result_d_stats = {}\n self.art_dict = {}\n self.meme_dict = {}\n self.hidden_rolls = {}\n\n def roll_input(self, user_input, optional_input):\n \"\"\"complicated as fuck function that handles input without breaking\"\"\"\n self.last_roll = []\n self.result = 0\n self.hidden = False\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.dropped_roll = ''\n try:\n split_input = dice_handler.match(user_input)\n self.number_of_dice = split_input.group(1)\n self.size_of_dice = split_input.group(3)\n self.modifier = split_input.group(5)\n self.modifier_number = split_input.group(6)\n if self.number_of_dice == '':\n self.number_of_dice = 1\n if self.number_of_dice == '0':\n self.error = \"You can't roll 0 dice\"\n if int(self.number_of_dice) > 200:\n self.error = (\n 'No! Thats to many dice I do not have that many!!!')\n return\n if self.size_of_dice == '0' or self.size_of_dice is None:\n self.error = 'Please define the dice size.'\n if int(self.size_of_dice) > 50000:\n self.error = ('Dice too big!' +\n ' That has gotta be fake nothing goes this high')\n return\n if self.modifier is None and len(str(user_input)) > len(str(\n self.number_of_dice) + str(self.size_of_dice) + 'D'):\n self.error = ' Incorrect modifier. Please use + or -'\n return\n if self.modifier == '' or self.modifier is None:\n self.modifier = '+'\n self.modifier_number = '0'\n if self.modifier_number == '' or self.modifier_number is None:\n self.modifier = '+'\n self.modifier_number = '0'\n self.input_last_roll = ' `Rolled ' + str(self.number_of_dice\n ) + 'd' + str(self.size_of_dice) + str(self.modifier) + str(\n self.modifier_number) + ':` '\n if optional_input.lower() == 'adv':\n self.adv = 'adv'\n self.handle_adv()\n elif optional_input.lower() == 'dadv':\n self.adv = 'dadv'\n self.handle_adv()\n elif optional_input.lower() == 'sort':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n elif optional_input.lower() == 'hide':\n self.hidden = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n elif optional_input.lower() != '':\n self.error = str(optional_input\n ) + ' is not a valid option. Please try (sort/adv/dadv/hide)'\n else:\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n except AttributeError:\n self.error = (\n ' Invalid input please follow this format (1)d20(+/-(5))')\n except ValueError:\n self.error = (\n ' Invalid input, please Make sure dice size is bigger than 0')\n\n def roll_dice(self, number_of_dice, size_of_dice):\n \"\"\"Simple function that rolls dice\"\"\"\n dice = []\n for roll in range(int(number_of_dice)):\n roll = random.randint(1, int(size_of_dice))\n dice.append(roll)\n if self.sort is True:\n dice.sort()\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n self.sort = False\n else:\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n\n def calculate_roll(self):\n \"\"\"Function to calculate the sum of the roll\"\"\"\n for i in self.last_roll:\n self.result = int(self.result) + int(i)\n self.result = self.result + int(self.modifier + self.modifier_number)\n\n def handle_adv(self):\n \"\"\"Function that handles the optional advantage options\"\"\"\n if self.adv == 'adv':\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) != '1':\n self.error = 'Can only roll advantage with 2 dice, ya dummy!'\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) == '1':\n self.number_of_dice = 2\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n self.dropped_roll = self.last_roll[0]\n del self.last_roll[0]\n self.adv = False\n if self.adv == 'dadv':\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) != '1':\n self.error = (\n 'Can only roll disadvantage with 2 dice, ya dummy!')\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) == '1':\n self.number_of_dice = 2\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n self.dropped_roll = self.last_roll[1]\n del self.last_roll[1]\n self.adv = False\n\n def roll_stats(self):\n \"\"\"Lets you roll new stats for a char\"\"\"\n for stat in range(6):\n self.roll_input('4d6', 'sort')\n self.dropped_d_stats[stat] = self.last_roll[0]\n del self.last_roll[0]\n self.calculate_roll()\n self.result_d_stats[stat] = self.result\n self.d_stats[stat] = self.last_roll\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass RollBot:\n \"\"\"A class that handles the bulk of functionality\"\"\"\n\n def __init__(self):\n \"\"\"initializes the attributes of the class\"\"\"\n self.input_last_roll = ''\n self.last_roll = []\n self.result = 0\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.sort = False\n self.adv = False\n self.hidden = False\n self.dropped_roll = ''\n self.d_stats = {}\n self.dropped_d_stats = {}\n self.result_d_stats = {}\n self.art_dict = {}\n self.meme_dict = {}\n self.hidden_rolls = {}\n\n def roll_input(self, user_input, optional_input):\n \"\"\"complicated as fuck function that handles input without breaking\"\"\"\n self.last_roll = []\n self.result = 0\n self.hidden = False\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.dropped_roll = ''\n try:\n split_input = dice_handler.match(user_input)\n self.number_of_dice = split_input.group(1)\n self.size_of_dice = split_input.group(3)\n self.modifier = split_input.group(5)\n self.modifier_number = split_input.group(6)\n if self.number_of_dice == '':\n self.number_of_dice = 1\n if self.number_of_dice == '0':\n self.error = \"You can't roll 0 dice\"\n if int(self.number_of_dice) > 200:\n self.error = (\n 'No! Thats to many dice I do not have that many!!!')\n return\n if self.size_of_dice == '0' or self.size_of_dice is None:\n self.error = 'Please define the dice size.'\n if int(self.size_of_dice) > 50000:\n self.error = ('Dice too big!' +\n ' That has gotta be fake nothing goes this high')\n return\n if self.modifier is None and len(str(user_input)) > len(str(\n self.number_of_dice) + str(self.size_of_dice) + 'D'):\n self.error = ' Incorrect modifier. Please use + or -'\n return\n if self.modifier == '' or self.modifier is None:\n self.modifier = '+'\n self.modifier_number = '0'\n if self.modifier_number == '' or self.modifier_number is None:\n self.modifier = '+'\n self.modifier_number = '0'\n self.input_last_roll = ' `Rolled ' + str(self.number_of_dice\n ) + 'd' + str(self.size_of_dice) + str(self.modifier) + str(\n self.modifier_number) + ':` '\n if optional_input.lower() == 'adv':\n self.adv = 'adv'\n self.handle_adv()\n elif optional_input.lower() == 'dadv':\n self.adv = 'dadv'\n self.handle_adv()\n elif optional_input.lower() == 'sort':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n elif optional_input.lower() == 'hide':\n self.hidden = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n elif optional_input.lower() != '':\n self.error = str(optional_input\n ) + ' is not a valid option. Please try (sort/adv/dadv/hide)'\n else:\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n except AttributeError:\n self.error = (\n ' Invalid input please follow this format (1)d20(+/-(5))')\n except ValueError:\n self.error = (\n ' Invalid input, please Make sure dice size is bigger than 0')\n\n def roll_dice(self, number_of_dice, size_of_dice):\n \"\"\"Simple function that rolls dice\"\"\"\n dice = []\n for roll in range(int(number_of_dice)):\n roll = random.randint(1, int(size_of_dice))\n dice.append(roll)\n if self.sort is True:\n dice.sort()\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n self.sort = False\n else:\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n\n def calculate_roll(self):\n \"\"\"Function to calculate the sum of the roll\"\"\"\n for i in self.last_roll:\n self.result = int(self.result) + int(i)\n self.result = self.result + int(self.modifier + self.modifier_number)\n\n def handle_adv(self):\n \"\"\"Function that handles the optional advantage options\"\"\"\n if self.adv == 'adv':\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) != '1':\n self.error = 'Can only roll advantage with 2 dice, ya dummy!'\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) == '1':\n self.number_of_dice = 2\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n self.dropped_roll = self.last_roll[0]\n del self.last_roll[0]\n self.adv = False\n if self.adv == 'dadv':\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) != '1':\n self.error = (\n 'Can only roll disadvantage with 2 dice, ya dummy!')\n if str(self.number_of_dice) != '2' and str(self.number_of_dice\n ) == '1':\n self.number_of_dice = 2\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n self.dropped_roll = self.last_roll[1]\n del self.last_roll[1]\n self.adv = False\n\n def roll_stats(self):\n \"\"\"Lets you roll new stats for a char\"\"\"\n for stat in range(6):\n self.roll_input('4d6', 'sort')\n self.dropped_d_stats[stat] = self.last_roll[0]\n del self.last_roll[0]\n self.calculate_roll()\n self.result_d_stats[stat] = self.result\n self.d_stats[stat] = self.last_roll\n\n\ndice_handler = re.compile('(\\\\d*)([dD])(\\\\d*)(([+-])(\\\\d*))?')\nph = profile_handler.PHandler()\n",
"step-5": "import random\nimport profile_handler\nimport re\n\n\nclass RollBot():\n \"\"\"A class that handles the bulk of functionality\"\"\"\n\n def __init__(self):\n \"\"\"initializes the attributes of the class\"\"\"\n # this is where the procesed user input gets stored for easy readbacks\n self.input_last_roll = ''\n # an empty list to store the results of the roll in\n self.last_roll = []\n # The sum of all the roles inside the last_roll list\n self.result = 0\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n self.modifier = ''\n self.modifier_number = ''\n self.sort = False\n self.adv = False\n self.hidden = False\n # a flag to save the number of the dropped roll on an adv roll\n self.dropped_roll = ''\n # flags for rolling stats for a char\n self.d_stats = {}\n self.dropped_d_stats = {}\n self.result_d_stats = {}\n # flag for the art/meme/hidden rolls dictionary\n self.art_dict = {}\n self.meme_dict = {}\n self.hidden_rolls = {}\n\n def roll_input(self, user_input, optional_input):\n \"\"\"complicated as fuck function that handles input without breaking\"\"\"\n\n # Resets the status of everything before a new roll\n self.last_roll = []\n self.result = 0\n self.hidden = False\n # An empty error flag to easily throw errors back through discord\n self.error = ''\n self.number_of_dice = ''\n self.size_of_dice = ''\n # The modifier is either a + or a -, stored for easy acces\n self.modifier = ''\n self.modifier_number = ''\n self.dropped_roll = ''\n\n # this code is run in try to catch atribute errors due to a wrong input\n try:\n # All parts filtered by regex get stored in an object,\n # that then gets split\n split_input = dice_handler.match(user_input)\n # sets the number of dice in the class for use in other functions\n self.number_of_dice = split_input.group(1)\n # sets the size of the dice in the class for use in other functions\n self.size_of_dice = split_input.group(3)\n # sets the +/- in the class for use in other functions\n self.modifier = split_input.group(5)\n # sets the number of the mod in the class to use in other functions\n self.modifier_number = split_input.group(6)\n\n # An if statements that alows typing 1 for rolling 1 dice to be\n # optional.\n if self.number_of_dice == '':\n self.number_of_dice = 1\n # Makes sure atleast 1 dice is rolled\n if self.number_of_dice == '0':\n self.error = \"You can't roll 0 dice\"\n # Sets a cap of 200 dice being rolled\n if int(self.number_of_dice) > 200:\n self.error = \\\n 'No! Thats to many dice I do not have that many!!!'\n return\n\n # Meant to catch errors where a none size dice managed to sneak\n # Through.\n if self.size_of_dice == '0' or self.size_of_dice is None:\n self.error = \"Please define the dice size.\"\n # Sets a cap on how large of a dice you can roll.\n if int(self.size_of_dice) > 50000:\n self.error = \"Dice too big!\" + \\\n \" That has gotta be fake nothing goes this high\"\n return\n\n # Checks wether no modifier was entered or if it was incorrectly\n # entered by checking the lenght of the input vs what came through.\n if self.modifier is None and \\\n len(str(user_input)) > \\\n len(str(self.number_of_dice) +\n str(self.size_of_dice) + 'D'):\n self.error = \" Incorrect modifier. Please use + or -\"\n return\n\n # Sets modifier to +0 if no +/- is entered.\n if self.modifier == '' or self.modifier is None:\n self.modifier = '+'\n self.modifier_number = '0'\n\n # Sets modifier to +0 if no number for it was entered\n if self.modifier_number == '' or self.modifier_number is None:\n self.modifier = '+'\n self.modifier_number = '0'\n\n # The full input of the user in 1 flag to print back to the user\n # at the end.\n self.input_last_roll = \\\n ' `Rolled ' + \\\n str(self.number_of_dice) + \\\n 'd' + \\\n str(self.size_of_dice) + \\\n str(self.modifier) + \\\n str(self.modifier_number) + \\\n ':` '\n\n if optional_input.lower() == 'adv':\n self.adv = 'adv'\n self.handle_adv()\n\n # Checks if user asked for disadvantage on a roll and hands it off\n elif optional_input.lower() == 'dadv':\n self.adv = 'dadv'\n self.handle_adv()\n\n # Checks if user asked for a sorted roll\n elif optional_input.lower() == 'sort':\n # Rolls the dice like normal but sorts the flag after.\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n\n elif optional_input.lower() == 'hide':\n # Rolls the dice like normal but does not show the result in channel.\n self.hidden = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n\n elif optional_input.lower() != '':\n self.error = str(optional_input) + \\\n \" is not a valid option. Please try (sort/adv/dadv/hide)\"\n\n else:\n # If everything passed the checks hand offs the proccesed input\n # to the randomizing and calculating functions.\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n\n # Catches and attribute error on a wrong input and notifies the user.\n except AttributeError:\n self.error = \\\n \" Invalid input please follow this format (1)d20(+/-(5))\"\n except ValueError:\n self.error = \\\n \" Invalid input, please Make sure dice size is bigger than 0\"\n\n def roll_dice(self, number_of_dice, size_of_dice):\n \"\"\"Simple function that rolls dice\"\"\"\n # makes a list of random numbers based on the information\n # that was put in\n dice = []\n for roll in range(int(number_of_dice)):\n roll = random.randint(1, int(size_of_dice))\n dice.append(roll)\n\n # Checks wether the result needs to be sorted or not\n if self.sort is True:\n dice.sort()\n # Turns ints into strings after sorting\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n # Sets the last roll flag and returns to sort flag to false\n self.last_roll = converted_dice\n self.sort = False\n # Sets the last roll flag for easy cross function use.\n else:\n # Turns Ints into strings incase it had to be sorted\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice\n\n def calculate_roll(self):\n \"\"\"Function to calculate the sum of the roll\"\"\"\n # Takes all the numbers from the last roll and adds them up.\n for i in self.last_roll:\n self.result = int(self.result) + int(i)\n self.result = self.result + int(self.modifier + self.modifier_number)\n\n def handle_adv(self):\n \"\"\"Function that handles the optional advantage options\"\"\"\n\n # This part handles advantage so it takes the highest of the 2 numbers\n # and then drops the lowest number\n if self.adv == 'adv':\n # Checks wether the number that was input is not 1 or 2.\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) != '1':\n self.error = 'Can only roll advantage with 2 dice, ya dummy!'\n\n # Checks if number of dice was left blank so automatically set to 1\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) == '1':\n self.number_of_dice = 2\n\n # Checks if the number of dice is 2 before moving on\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n # Stores the dropped roll before deleting it from last_roll\n self.dropped_roll = self.last_roll[0]\n del self.last_roll[0]\n # Returns flag to default state\n self.adv = False\n\n # This part handles disadvantage so it takes the lowest of the 2\n # numbers and then drops the highest number\n if self.adv == 'dadv':\n # Checks wether the number that was input is not 1 or 2.\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) != '1':\n self.error = \\\n 'Can only roll disadvantage with 2 dice, ya dummy!'\n\n # Checks if number of dice was left blank so automatically set to 1\n if str(self.number_of_dice) != '2' and \\\n str(self.number_of_dice) == '1':\n self.number_of_dice = 2\n\n # Checks if the number of dice is 2 before moving on\n if str(self.number_of_dice) == '2':\n self.sort = True\n self.roll_dice(self.number_of_dice, self.size_of_dice)\n # Stores the dropped roll before deleting it from last_roll\n self.dropped_roll = self.last_roll[1]\n del self.last_roll[1]\n # Returns flag to default state\n self.adv = False\n\n def roll_stats(self):\n \"\"\"Lets you roll new stats for a char\"\"\"\n\n for stat in range(6):\n self.roll_input('4d6', 'sort')\n self.dropped_d_stats[stat] = self.last_roll[0]\n del self.last_roll[0]\n self.calculate_roll()\n self.result_d_stats[stat] = self.result\n self.d_stats[stat] = self.last_roll\n\n\n# The regex that looks through the input for key information.\ndice_handler = re.compile(r'(\\d*)([dD])(\\d*)(([+-])(\\d*))?')\nph = profile_handler.PHandler()\n",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('높이는', height, ' 밑변은', base, '사각형의 면적은', area, '입니다.')
<|reserved_special_token_1|>
height = int(input('높이 입력: '))
base = int(input('밑변 입력: '))
area = height * base
print('높이는', height, ' 밑변은', base, '사각형의 면적은', area, '입니다.')
<|reserved_special_token_1|>
#사각형의 면적을 구하는 프로그램을 작성하시오,
#사각형의 면적 = 높이*밑변
height=int(input('높이 입력: '))
base=int(input('밑변 입력: '))
area=height*base
print('높이는',height,' 밑변은',base,'사각형의 면적은',area,'입니다.')
|
flexible
|
{
"blob_id": "f9b48c1b6489d8981e192838cf1c734e2296ab15",
"index": 9833,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('높이는', height, ' 밑변은', base, '사각형의 면적은', area, '입니다.')\n",
"step-3": "height = int(input('높이 입력: '))\nbase = int(input('밑변 입력: '))\narea = height * base\nprint('높이는', height, ' 밑변은', base, '사각형의 면적은', area, '입니다.')\n",
"step-4": "#사각형의 면적을 구하는 프로그램을 작성하시오,\r\n#사각형의 면적 = 높이*밑변\r\n\r\nheight=int(input('높이 입력: '))\r\nbase=int(input('밑변 입력: '))\r\n\r\narea=height*base\r\n\r\nprint('높이는',height,' 밑변은',base,'사각형의 면적은',area,'입니다.')\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding: utf-8
# In[2]:
from HSTLens_base_classifier_resnet17_s import BaseKerasClassifier
from keras.layers import Activation, AveragePooling2D, MaxPooling2D
from keras.layers import Conv2D, ELU, Dropout, LeakyReLU
from keras.layers.normalization import BatchNormalization
class deeplens_classifier(BaseKerasClassifier):
def _model_definition(self, net):
"""
Builds the architecture of the network
"""
# Input filtering and downsampling with max pooling
print(net.shape) #channels must be specified first otherwise keras assumes channels last
print('resnet17_scp')
net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same',
data_format="channels_first", input_shape=(1, 100, 100))(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format="channels_first")(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format="channels_first")(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
return net
# In[ ]:
|
normal
|
{
"blob_id": "6bd47fb71a32b8383a75e72111d802008bc6bc68",
"index": 3350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass deeplens_classifier(BaseKerasClassifier):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass deeplens_classifier(BaseKerasClassifier):\n\n def _model_definition(self, net):\n \"\"\"\n Builds the architecture of the network\n \"\"\"\n print(net.shape)\n print('resnet17_scp')\n net = Conv2D(filters=128, kernel_size=5, activation=None, padding=\n 'same', data_format='channels_first', input_shape=(1, 100, 100))(\n net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n return net\n",
"step-4": "from HSTLens_base_classifier_resnet17_s import BaseKerasClassifier\nfrom keras.layers import Activation, AveragePooling2D, MaxPooling2D\nfrom keras.layers import Conv2D, ELU, Dropout, LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\n\n\nclass deeplens_classifier(BaseKerasClassifier):\n\n def _model_definition(self, net):\n \"\"\"\n Builds the architecture of the network\n \"\"\"\n print(net.shape)\n print('resnet17_scp')\n net = Conv2D(filters=128, kernel_size=5, activation=None, padding=\n 'same', data_format='channels_first', input_shape=(1, 100, 100))(\n net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n return net\n",
"step-5": "\n# coding: utf-8\n\n# In[2]:\n\n\n\nfrom HSTLens_base_classifier_resnet17_s import BaseKerasClassifier\n\nfrom keras.layers import Activation, AveragePooling2D, MaxPooling2D\nfrom keras.layers import Conv2D, ELU, Dropout, LeakyReLU\n\nfrom keras.layers.normalization import BatchNormalization\n\nclass deeplens_classifier(BaseKerasClassifier):\n\n def _model_definition(self, net):\n \"\"\"\n Builds the architecture of the network\n \"\"\"\n \n # Input filtering and downsampling with max pooling\n print(net.shape) #channels must be specified first otherwise keras assumes channels last\n print('resnet17_scp')\n \n net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same', \n data_format=\"channels_first\", input_shape=(1, 100, 100))(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels \n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n\n\n \n return net\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class SampleForm(forms.ModelForm):
class Meta:
model = Sample
fields = ['name', 'alias', 'sample_type', 'description', 'project',
'author', 'sequence', 'length', 'genbank', 'source_reference',
'comments', 'parent_id', 'organism', 'genus_specie', 'marker',
'application', 'strategy', 'seq_verified', 'origin_rep',
'cloning_system', 'strand', 'order_number', 'part_type',
'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',
'tm']
class PlateForm(forms.ModelForm):
class Meta:
model = Plate
fields = ['name', 'barcode', 'type', 'contents', 'location',
'num_cols', 'num_rows', 'num_well', 'function', 'project',
'active', 'status']
class WellForm(forms.ModelForm):
class Meta:
model = Well
fields = ['name', 'volume', 'concentration', 'plate', 'samples',
'active', 'status']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['name', 'author', 'collaborators', 'status', 'comments']
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = ['name', 'script', 'author', 'file']
class SampleForm(forms.ModelForm):
class Meta:
model = Sample
fields = ['name', 'alias', 'sample_type', 'description', 'project',
'author', 'sequence', 'length', 'genbank', 'source_reference',
'comments', 'parent_id', 'organism', 'genus_specie', 'marker',
'application', 'strategy', 'seq_verified', 'origin_rep',
'cloning_system', 'strand', 'order_number', 'part_type',
'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',
'tm']
class PlateForm(forms.ModelForm):
class Meta:
model = Plate
fields = ['name', 'barcode', 'type', 'contents', 'location',
'num_cols', 'num_rows', 'num_well', 'function', 'project',
'active', 'status']
class WellForm(forms.ModelForm):
class Meta:
model = Well
fields = ['name', 'volume', 'concentration', 'plate', 'samples',
'active', 'status']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MachineForm(forms.ModelForm):
class Meta:
model = Machine
fields = ['name', 'author', 'status', 'comments']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['name', 'author', 'collaborators', 'status', 'comments']
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = ['name', 'script', 'author', 'file']
class SampleForm(forms.ModelForm):
class Meta:
model = Sample
fields = ['name', 'alias', 'sample_type', 'description', 'project',
'author', 'sequence', 'length', 'genbank', 'source_reference',
'comments', 'parent_id', 'organism', 'genus_specie', 'marker',
'application', 'strategy', 'seq_verified', 'origin_rep',
'cloning_system', 'strand', 'order_number', 'part_type',
'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',
'tm']
class PlateForm(forms.ModelForm):
class Meta:
model = Plate
fields = ['name', 'barcode', 'type', 'contents', 'location',
'num_cols', 'num_rows', 'num_well', 'function', 'project',
'active', 'status']
class WellForm(forms.ModelForm):
class Meta:
model = Well
fields = ['name', 'volume', 'concentration', 'plate', 'samples',
'active', 'status']
<|reserved_special_token_1|>
from django import forms
from .models import File, Sample, Plate, Well, Machine, Project
class MachineForm(forms.ModelForm):
class Meta:
model = Machine
fields = ['name', 'author', 'status', 'comments']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['name', 'author', 'collaborators', 'status', 'comments']
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = ['name', 'script', 'author', 'file']
class SampleForm(forms.ModelForm):
class Meta:
model = Sample
fields = ['name', 'alias', 'sample_type', 'description', 'project',
'author', 'sequence', 'length', 'genbank', 'source_reference',
'comments', 'parent_id', 'organism', 'genus_specie', 'marker',
'application', 'strategy', 'seq_verified', 'origin_rep',
'cloning_system', 'strand', 'order_number', 'part_type',
'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',
'tm']
class PlateForm(forms.ModelForm):
class Meta:
model = Plate
fields = ['name', 'barcode', 'type', 'contents', 'location',
'num_cols', 'num_rows', 'num_well', 'function', 'project',
'active', 'status']
class WellForm(forms.ModelForm):
class Meta:
model = Well
fields = ['name', 'volume', 'concentration', 'plate', 'samples',
'active', 'status']
|
flexible
|
{
"blob_id": "5bb894feaf9293bf70b3f831e33be555f74efde8",
"index": 6901,
"step-1": "<mask token>\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-2": "<mask token>\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-3": "<mask token>\n\n\nclass MachineForm(forms.ModelForm):\n\n\n class Meta:\n model = Machine\n fields = ['name', 'author', 'status', 'comments']\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-4": "from django import forms\nfrom .models import File, Sample, Plate, Well, Machine, Project\n\n\nclass MachineForm(forms.ModelForm):\n\n\n class Meta:\n model = Machine\n fields = ['name', 'author', 'status', 'comments']\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
class ListNode:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class ListNode:
def __init__(self, val: int, next=None):
self.val = val
self.next = next
<|reserved_special_token_0|>
def print_list(head: ListNode):
node = head
while node:
print(str(node.val) + '-->')
node = node.next
print('---end--')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class ListNode:
def __init__(self, val: int, next=None):
self.val = val
self.next = next
def reverseKGroup(head: ListNode, k: int) ->ListNode:
prev, cur, rs, successor = None, head, head, None
def reverseK(node: ListNode, count: int) ->ListNode:
nonlocal successor
nonlocal prev
if count + 1 == k:
successor = node.next
return node
first = reverseK(node.next, count + 1)
node.next.next = node
node.next = successor
if prev:
prev.next = first
return first
index = 1
while cur:
if index % k == 0:
sub_head = reverseK(rs, 0)
prev = rs
if index == k:
head = sub_head
rs, cur = successor, successor
else:
cur = cur.next
index += 1
return head
def print_list(head: ListNode):
node = head
while node:
print(str(node.val) + '-->')
node = node.next
print('---end--')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class ListNode:
def __init__(self, val: int, next=None):
self.val = val
self.next = next
def reverseKGroup(head: ListNode, k: int) ->ListNode:
prev, cur, rs, successor = None, head, head, None
def reverseK(node: ListNode, count: int) ->ListNode:
nonlocal successor
nonlocal prev
if count + 1 == k:
successor = node.next
return node
first = reverseK(node.next, count + 1)
node.next.next = node
node.next = successor
if prev:
prev.next = first
return first
index = 1
while cur:
if index % k == 0:
sub_head = reverseK(rs, 0)
prev = rs
if index == k:
head = sub_head
rs, cur = successor, successor
else:
cur = cur.next
index += 1
return head
def print_list(head: ListNode):
node = head
while node:
print(str(node.val) + '-->')
node = node.next
print('---end--')
if __name__ == '__main__':
five = ListNode(5)
four = ListNode(4, five)
three = ListNode(3, four)
two = ListNode(2, three)
one = ListNode(1, two)
reversed_node = reverseKGroup(one, 5)
print_list(reversed_node)
<|reserved_special_token_1|>
class ListNode:
def __init__(self, val: int, next=None):
self.val = val
self.next = next
def reverseKGroup(head: ListNode, k: int) -> ListNode:
prev, cur, rs, successor = None, head, head, None
def reverseK(node: ListNode, count: int) -> ListNode:
nonlocal successor
nonlocal prev
if count + 1 == k:
successor = node.next
return node
first = reverseK(node.next, count + 1)
node.next.next = node
node.next = successor
if prev: prev.next = first
return first
index = 1
while cur:
if index % k == 0:
sub_head = reverseK(rs, 0)
prev = rs
if index == k: head = sub_head
rs, cur = successor, successor
else:
cur = cur.next
index += 1
return head
def print_list(head: ListNode):
node = head
while node:
print(str(node.val) + '-->')
node = node.next
print('---end--')
if __name__ == '__main__':
five = ListNode(5)
four = ListNode(4, five)
three = ListNode(3, four)
two = ListNode(2, three)
one = ListNode(1, two)
# print_list(one)
reversed_node = reverseKGroup(one, 5)
print_list(reversed_node)
|
flexible
|
{
"blob_id": "67904f3a29b0288a24e702f9c3ee001ebc279748",
"index": 3542,
"step-1": "class ListNode:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class ListNode:\n\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\n<mask token>\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\n<mask token>\n",
"step-3": "class ListNode:\n\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\ndef reverseKGroup(head: ListNode, k: int) ->ListNode:\n prev, cur, rs, successor = None, head, head, None\n\n def reverseK(node: ListNode, count: int) ->ListNode:\n nonlocal successor\n nonlocal prev\n if count + 1 == k:\n successor = node.next\n return node\n first = reverseK(node.next, count + 1)\n node.next.next = node\n node.next = successor\n if prev:\n prev.next = first\n return first\n index = 1\n while cur:\n if index % k == 0:\n sub_head = reverseK(rs, 0)\n prev = rs\n if index == k:\n head = sub_head\n rs, cur = successor, successor\n else:\n cur = cur.next\n index += 1\n return head\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\n<mask token>\n",
"step-4": "class ListNode:\n\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\ndef reverseKGroup(head: ListNode, k: int) ->ListNode:\n prev, cur, rs, successor = None, head, head, None\n\n def reverseK(node: ListNode, count: int) ->ListNode:\n nonlocal successor\n nonlocal prev\n if count + 1 == k:\n successor = node.next\n return node\n first = reverseK(node.next, count + 1)\n node.next.next = node\n node.next = successor\n if prev:\n prev.next = first\n return first\n index = 1\n while cur:\n if index % k == 0:\n sub_head = reverseK(rs, 0)\n prev = rs\n if index == k:\n head = sub_head\n rs, cur = successor, successor\n else:\n cur = cur.next\n index += 1\n return head\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\nif __name__ == '__main__':\n five = ListNode(5)\n four = ListNode(4, five)\n three = ListNode(3, four)\n two = ListNode(2, three)\n one = ListNode(1, two)\n reversed_node = reverseKGroup(one, 5)\n print_list(reversed_node)\n",
"step-5": "class ListNode:\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\ndef reverseKGroup(head: ListNode, k: int) -> ListNode:\n prev, cur, rs, successor = None, head, head, None\n\n def reverseK(node: ListNode, count: int) -> ListNode:\n nonlocal successor\n nonlocal prev\n\n if count + 1 == k:\n successor = node.next\n return node\n\n first = reverseK(node.next, count + 1)\n\n node.next.next = node\n node.next = successor\n if prev: prev.next = first\n return first\n\n index = 1\n while cur:\n if index % k == 0:\n sub_head = reverseK(rs, 0)\n prev = rs\n if index == k: head = sub_head\n rs, cur = successor, successor\n else:\n cur = cur.next\n index += 1\n return head\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\nif __name__ == '__main__':\n five = ListNode(5)\n four = ListNode(4, five)\n three = ListNode(3, four)\n two = ListNode(2, three)\n one = ListNode(1, two)\n # print_list(one)\n reversed_node = reverseKGroup(one, 5)\n print_list(reversed_node)\n\n\n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)
<|reserved_special_token_0|>
plt.subplot(131), plt.imshow(img_noblur, cmap='gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel'), plt.xticks([]), plt.yticks([])
plt.subplot(133), plt.imshow(imgnew, cmap='gray')
plt.title('Output'), plt.xticks([]), plt.yticks([])
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img_noblur = cv2.imread('road8.jpg')
imgnew = img_noblur.copy()
img_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img_noblur_grey, (5, 5), 0)
sobelx = cv2.Sobel(img, -1, 1, 0, ksize=3)
sobelx[sobelx < 100] = 0
lines = cv2.HoughLinesP(sobelx, 1, np.pi / 180, 100)
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)
imgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)
img_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)
plt.subplot(131), plt.imshow(img_noblur, cmap='gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel'), plt.xticks([]), plt.yticks([])
plt.subplot(133), plt.imshow(imgnew, cmap='gray')
plt.title('Output'), plt.xticks([]), plt.yticks([])
plt.show()
<|reserved_special_token_1|>
import cv2
import numpy as np
import matplotlib.pyplot as plt
img_noblur = cv2.imread('road8.jpg')
imgnew = img_noblur.copy()
img_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img_noblur_grey, (5, 5), 0)
sobelx = cv2.Sobel(img, -1, 1, 0, ksize=3)
sobelx[sobelx < 100] = 0
lines = cv2.HoughLinesP(sobelx, 1, np.pi / 180, 100)
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)
imgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)
img_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)
plt.subplot(131), plt.imshow(img_noblur, cmap='gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel'), plt.xticks([]), plt.yticks([])
plt.subplot(133), plt.imshow(imgnew, cmap='gray')
plt.title('Output'), plt.xticks([]), plt.yticks([])
plt.show()
<|reserved_special_token_1|>
import cv2 #imports cv2 package
import numpy as np #imports numpy package
import matplotlib.pyplot as plt #imports matplotlib.pyplot package
img_noblur = cv2.imread('road8.jpg') #reads the image
imgnew = img_noblur.copy() #creates a copy of the image
img_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY) #converts the image from BGR to Grayscale
img = cv2.GaussianBlur(img_noblur_grey,(5,5),0) #applies a Gaussian Blur to the image for smoothing
sobelx = cv2.Sobel(img,-1,1,0,ksize=3) #applies Sobel horizontal kernel of size 3 to the image
sobelx[sobelx<100] = 0 #discards low intensity pixels
lines = cv2.HoughLinesP(sobelx,1,np.pi/180,100) #use HoughLinesP to detect lines in the image to which Sobel horizontal kernel was applied
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(imgnew,(x1,y1),(x2,y2),(0,255,0),5) #draws the detected lines on the image
imgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB) #converts the image from BGR to RGB
img_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB) #converts the original image from BGR to RGB for display
plt.subplot(131),plt.imshow(img_noblur,cmap = 'gray') #plots the original image
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(sobelx,cmap = 'gray') #plots the result of applying Sobel horizontal kernel to the image
plt.title('Sobel'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(imgnew,cmap = 'gray') #plots the result with the road markers detected
plt.title('Output'), plt.xticks([]), plt.yticks([])
plt.show() #displays the figure
|
flexible
|
{
"blob_id": "7b4f46f6c286a7d0ef45079b2fd238b81d5f89eb",
"index": 3493,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in range(0, len(lines)):\n for x1, y1, x2, y2 in lines[x]:\n cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)\n<mask token>\nplt.subplot(131), plt.imshow(img_noblur, cmap='gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132), plt.imshow(sobelx, cmap='gray')\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133), plt.imshow(imgnew, cmap='gray')\nplt.title('Output'), plt.xticks([]), plt.yticks([])\nplt.show()\n",
"step-3": "<mask token>\nimg_noblur = cv2.imread('road8.jpg')\nimgnew = img_noblur.copy()\nimg_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)\nimg = cv2.GaussianBlur(img_noblur_grey, (5, 5), 0)\nsobelx = cv2.Sobel(img, -1, 1, 0, ksize=3)\nsobelx[sobelx < 100] = 0\nlines = cv2.HoughLinesP(sobelx, 1, np.pi / 180, 100)\nfor x in range(0, len(lines)):\n for x1, y1, x2, y2 in lines[x]:\n cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)\nimgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)\nimg_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)\nplt.subplot(131), plt.imshow(img_noblur, cmap='gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132), plt.imshow(sobelx, cmap='gray')\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133), plt.imshow(imgnew, cmap='gray')\nplt.title('Output'), plt.xticks([]), plt.yticks([])\nplt.show()\n",
"step-4": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimg_noblur = cv2.imread('road8.jpg')\nimgnew = img_noblur.copy()\nimg_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)\nimg = cv2.GaussianBlur(img_noblur_grey, (5, 5), 0)\nsobelx = cv2.Sobel(img, -1, 1, 0, ksize=3)\nsobelx[sobelx < 100] = 0\nlines = cv2.HoughLinesP(sobelx, 1, np.pi / 180, 100)\nfor x in range(0, len(lines)):\n for x1, y1, x2, y2 in lines[x]:\n cv2.line(imgnew, (x1, y1), (x2, y2), (0, 255, 0), 5)\nimgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)\nimg_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)\nplt.subplot(131), plt.imshow(img_noblur, cmap='gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132), plt.imshow(sobelx, cmap='gray')\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133), plt.imshow(imgnew, cmap='gray')\nplt.title('Output'), plt.xticks([]), plt.yticks([])\nplt.show()\n",
"step-5": "import cv2\t\t\t\t\t\t\t\t\t\t#imports cv2 package\nimport numpy as np \t\t\t\t\t\t\t\t#imports numpy package\nimport matplotlib.pyplot as plt \t\t\t\t#imports matplotlib.pyplot package\n\nimg_noblur = cv2.imread('road8.jpg')\t\t\t#reads the image\nimgnew = img_noblur.copy()\t\t\t\t\t\t#creates a copy of the image\nimg_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY)\t#converts the image from BGR to Grayscale\nimg = cv2.GaussianBlur(img_noblur_grey,(5,5),0)\t#applies a Gaussian Blur to the image for smoothing\n\nsobelx = cv2.Sobel(img,-1,1,0,ksize=3)\t\t\t#applies Sobel horizontal kernel of size 3 to the image\nsobelx[sobelx<100] = 0\t\t\t\t\t\t\t#discards low intensity pixels\n\nlines = cv2.HoughLinesP(sobelx,1,np.pi/180,100)\t#use HoughLinesP to detect lines in the image to which Sobel horizontal kernel was applied\nfor x in range(0, len(lines)):\n for x1,y1,x2,y2 in lines[x]:\n cv2.line(imgnew,(x1,y1),(x2,y2),(0,255,0),5)\t\t#draws the detected lines on the image\n\nimgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB)\t\t\t#converts the image from BGR to RGB\nimg_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB)\t#converts the original image from BGR to RGB for display\n\nplt.subplot(131),plt.imshow(img_noblur,cmap = 'gray')\t\t#plots the original image\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(132),plt.imshow(sobelx,cmap = 'gray')\t\t\t#plots the result of applying Sobel horizontal kernel to the image\nplt.title('Sobel'), plt.xticks([]), plt.yticks([])\nplt.subplot(133),plt.imshow(imgnew,cmap = 'gray')\t\t\t#plots the result with the road markers detected\nplt.title('Output'), plt.xticks([]), plt.yticks([])\n\nplt.show()\t\t\t\t\t\t\t\t\t\t\t#displays the figure",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, Collection, Container, Iterable, Sequence
from flask import g
from sqlalchemy import or_, select
from sqlalchemy.orm import joinedload
from airflow.auth.managers.fab.models import Permission, Resource, Role, User
from airflow.auth.managers.fab.views.permissions import (
ActionModelView,
PermissionPairModelView,
ResourceModelView,
)
from airflow.auth.managers.fab.views.roles_list import CustomRoleModelView
from airflow.auth.managers.fab.views.user import (
CustomUserDBModelView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
)
from airflow.auth.managers.fab.views.user_edit import (
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomUserInfoEditView,
)
from airflow.auth.managers.fab.views.user_stats import CustomUserStatsChartView
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.www.extensions.init_auth_manager import get_auth_manager
from airflow.www.fab_security.sqla.manager import SecurityManager
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
"Admin",
"Viewer",
"User",
"Op",
"Public",
}
if TYPE_CHECKING:
from sqlalchemy.orm import Session
SecurityManagerOverride: type = object
else:
# Fetch the security manager override from the auth manager
SecurityManagerOverride = get_auth_manager().get_security_manager_override_class()
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager, LoggingMixin):
"""Custom security manager, which introduces a permission model adapted to Airflow."""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global resource for dag-level access
DAG_RESOURCES = {permissions.RESOURCE_DAG}
DAG_ACTIONS = permissions.DAG_ACTIONS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS: list[dict[str, Any]] = [
{"role": "Public", "perms": []},
{"role": "Viewer", "perms": VIEWER_PERMISSIONS},
{
"role": "User",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
"role": "Op",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
"role": "Admin",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
actionmodelview = ActionModelView
permissionmodelview = PermissionPairModelView
rolemodelview = CustomRoleModelView
resourcemodelview = ResourceModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder) -> None:
super().__init__(
appbuilder=appbuilder,
actionmodelview=self.actionmodelview,
authdbview=self.authdbview,
authldapview=self.authldapview,
authoauthview=self.authoauthview,
authoidview=self.authoidview,
authremoteuserview=self.authremoteuserview,
permissionmodelview=self.permissionmodelview,
registeruser_view=self.registeruser_view,
registeruserdbview=self.registeruserdbview,
registeruseroauthview=self.registeruseroauthview,
registerusermodelview=self.registerusermodelview,
registeruseroidview=self.registeruseroidview,
resetmypasswordview=self.resetmypasswordview,
resetpasswordview=self.resetpasswordview,
rolemodelview=self.rolemodelview,
user_model=self.user_model,
userinfoeditview=self.userinfoeditview,
userdbmodelview=self.userdbmodelview,
userldapmodelview=self.userldapmodelview,
useroauthmodelview=self.useroauthmodelview,
useroidmodelview=self.useroidmodelview,
userremoteusermodelview=self.userremoteusermodelview,
userstatschartview=self.userstatschartview,
)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith("view"):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, "datamodel", None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def _get_root_dag_id(self, dag_id: str) -> str:
if "." in dag_id:
dm = self.appbuilder.get_session.execute(
select(DagModel.dag_id, DagModel.root_dag_id).where(DagModel.dag_id == dag_id)
).one()
return dm.root_dag_id or dm.dag_id
return dag_id
def init_role(self, role_name, perms) -> None:
"""
Initialize the role with actions and related resources.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.bulk_sync_roles([{"role": role_name, "perms": perms}])
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config["role"]
perms = config["perms"]
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(
action_name, resource_name
)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
@provide_session
def get_accessible_dags(
self,
user_actions: Container[str] | None,
user,
session: Session = NEW_SESSION,
) -> Iterable[DagModel]:
warnings.warn(
"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=3,
)
dag_ids = self.get_accessible_dag_ids(user, user_actions, session)
return session.scalars(select(DagModel).where(DagModel.dag_id.in_(dag_ids)))
def get_readable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(
self,
user,
user_actions: Container[str] | None = None,
session: Session = NEW_SESSION,
) -> set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (
permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)
):
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
user_query = session.scalar(
select(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(Permission.action), joinedload(Permission.resource))
)
.where(User.id == user.id)
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return {
dag.dag_id
for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))
}
def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != "~":
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)
def can_edit_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)
def can_delete_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) -> str:
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) -> bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None) -> bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) -> bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) -> bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return (
self._has_role(["Admin", "Viewer", "Op", "User"], user)
or self.can_read_all_dags(user)
or self.can_edit_all_dags(user)
)
def can_edit_all_dags(self, user=None) -> bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) -> bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)
def clean_perms(self) -> None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug("Cleaning faulty perms")
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(
or_(
Permission.action == None, # noqa
Permission.resource == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info("Deleted %s faulty permissions", deleted_count)
def _merge_perm(self, action_name: str, resource_name: str) -> None:
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = self.appbuilder.get_session.scalar(
select(self.permission_model).filter_by(action=action, resource=resource).limit(1)
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self) -> None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) -> set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name)
.join(self.permission_model.action)
.join(self.permission_model.resource)
)
)
def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:
"""
Get permissions except those that are for specific DAGs.
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name, self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.where(~self.resource_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
)
}
def _get_all_roles_with_permissions(self) -> dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {
r.name: r
for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.permissions))
).unique()
}
def create_dag_specific_permissions(self) -> None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) -> None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(
select(Resource).where(Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role("Admin")
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) -> None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(
self,
dag_id: str,
access_control: dict[str, Collection[str]] | None = None,
) -> None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'", dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset.",
dag_resource_name,
)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) -> Permission | None:
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != "Admin"]
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.action,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named "
f"'{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes "
f"the following invalid permissions: {invalid_action_names}; "
f"The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) -> None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self,
perms: Sequence[tuple[str, str]] | None = None,
dag_id: str | None = None,
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: Session | None = None) -> None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: Session | None = None):
self.appbuilder = FakeAppBuilder(session)
|
normal
|
{
"blob_id": "47cee0c659976a2b74e2bb07f6c4d622ceab7362",
"index": 3866,
"step-1": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n <mask token>\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n <mask token>\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n <mask token>\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n <mask token>\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n <mask token>\n <mask token>\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-2": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n <mask token>\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n <mask token>\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n <mask token>\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-3": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, appbuilder) ->None:\n super().__init__(appbuilder=appbuilder, actionmodelview=self.\n actionmodelview, authdbview=self.authdbview, authldapview=self.\n authldapview, authoauthview=self.authoauthview, authoidview=\n self.authoidview, authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview, registeruser_view\n =self.registeruser_view, registeruserdbview=self.\n registeruserdbview, registeruseroauthview=self.\n registeruseroauthview, registerusermodelview=self.\n registerusermodelview, registeruseroidview=self.\n registeruseroidview, resetmypasswordview=self.\n resetmypasswordview, resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview, user_model=self.user_model,\n userinfoeditview=self.userinfoeditview, userdbmodelview=self.\n userdbmodelview, userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview, useroidmodelview=\n self.useroidmodelview, userremoteusermodelview=self.\n userremoteusermodelview, userstatschartview=self.userstatschartview\n )\n for attr in dir(self):\n if not attr.endswith('view'):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, 'datamodel', None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n for config in roles:\n role_name = config['role']\n perms = config['perms']\n role = existing_roles.get(role_name) or self.add_role(role_name)\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)\n ) or self.create_permission(action_name, resource_name)\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) ->None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(select(self.\n permission_model).filter_by(action=action, resource=\n resource).limit(1))\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n\n def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {(action_name, resource_name): viewmodel for action_name,\n resource_name, viewmodel in self.appbuilder.get_session.execute\n (select(self.action_model.name, self.resource_model.name, self.\n permission_model).join(self.permission_model.action).join(self.\n permission_model.resource).where(~self.resource_model.name.like\n (f'{permissions.RESOURCE_DAG_PREFIX}%')))}\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-4": "<mask token>\nEXISTING_ROLES = {'Admin', 'Viewer', 'User', 'Op', 'Public'}\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n SecurityManagerOverride: type = object\nelse:\n SecurityManagerOverride = get_auth_manager(\n ).get_security_manager_override_class()\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n \"\"\"Custom security manager, which introduces a permission model adapted to Airflow.\"\"\"\n VIEWER_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_AUDIT_LOG), (permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DAG_CODE), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_JOB), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD), (permissions.\n ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE), (permissions.\n ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_PLUGIN), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_XCOM), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_TASK_INSTANCE)]\n USER_PERMISSIONS = [(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_CREATE, permissions.\n RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_DELETE,\n permissions.RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_CREATE,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_DELETE,\n permissions.RESOURCE_DAG_RUN)]\n OP_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_ADMIN_MENU), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CONNECTION), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM)]\n ADMIN_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_ROLE), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_ROLE)]\n DAG_RESOURCES = {permissions.RESOURCE_DAG}\n DAG_ACTIONS = permissions.DAG_ACTIONS\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n actionmodelview = ActionModelView\n permissionmodelview = PermissionPairModelView\n rolemodelview = CustomRoleModelView\n resourcemodelview = ResourceModelView\n userdbmodelview = CustomUserDBModelView\n resetmypasswordview = CustomResetMyPasswordView\n resetpasswordview = CustomResetPasswordView\n userinfoeditview = CustomUserInfoEditView\n userldapmodelview = CustomUserLDAPModelView\n useroauthmodelview = CustomUserOAuthModelView\n userremoteusermodelview = CustomUserRemoteUserModelView\n useroidmodelview = CustomUserOIDModelView\n userstatschartview = CustomUserStatsChartView\n\n def __init__(self, appbuilder) ->None:\n super().__init__(appbuilder=appbuilder, actionmodelview=self.\n actionmodelview, authdbview=self.authdbview, authldapview=self.\n authldapview, authoauthview=self.authoauthview, authoidview=\n self.authoidview, authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview, registeruser_view\n =self.registeruser_view, registeruserdbview=self.\n registeruserdbview, registeruseroauthview=self.\n registeruseroauthview, registerusermodelview=self.\n registerusermodelview, registeruseroidview=self.\n registeruseroidview, resetmypasswordview=self.\n resetmypasswordview, resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview, user_model=self.user_model,\n userinfoeditview=self.userinfoeditview, userdbmodelview=self.\n userdbmodelview, userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview, useroidmodelview=\n self.useroidmodelview, userremoteusermodelview=self.\n userremoteusermodelview, userstatschartview=self.userstatschartview\n )\n for attr in dir(self):\n if not attr.endswith('view'):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, 'datamodel', None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n\n def init_role(self, role_name, perms) ->None:\n \"\"\"\n Initialize the role with actions and related resources.\n\n :param role_name:\n :param perms:\n :return:\n \"\"\"\n warnings.warn(\n '`init_role` has been deprecated. Please use `bulk_sync_roles` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n self.bulk_sync_roles([{'role': role_name, 'perms': perms}])\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n for config in roles:\n role_name = config['role']\n perms = config['perms']\n role = existing_roles.get(role_name) or self.add_role(role_name)\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)\n ) or self.create_permission(action_name, resource_name)\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n\n @provide_session\n def get_accessible_dags(self, user_actions: (Container[str] | None),\n user, session: Session=NEW_SESSION) ->Iterable[DagModel]:\n warnings.warn(\n '`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=3)\n dag_ids = self.get_accessible_dag_ids(user, user_actions, session)\n return session.scalars(select(DagModel).where(DagModel.dag_id.in_(\n dag_ids)))\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n\n def can_edit_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG edit access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT,\n dag_resource_name, user=user)\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) ->None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(select(self.\n permission_model).filter_by(action=action, resource=\n resource).limit(1))\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n\n def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {(action_name, resource_name): viewmodel for action_name,\n resource_name, viewmodel in self.appbuilder.get_session.execute\n (select(self.action_model.name, self.resource_model.name, self.\n permission_model).join(self.permission_model.action).join(self.\n permission_model.resource).where(~self.resource_model.name.like\n (f'{permissions.RESOURCE_DAG_PREFIX}%')))}\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport warnings\nfrom typing import TYPE_CHECKING, Any, Collection, Container, Iterable, Sequence\n\nfrom flask import g\nfrom sqlalchemy import or_, select\nfrom sqlalchemy.orm import joinedload\n\nfrom airflow.auth.managers.fab.models import Permission, Resource, Role, User\nfrom airflow.auth.managers.fab.views.permissions import (\n ActionModelView,\n PermissionPairModelView,\n ResourceModelView,\n)\nfrom airflow.auth.managers.fab.views.roles_list import CustomRoleModelView\nfrom airflow.auth.managers.fab.views.user import (\n CustomUserDBModelView,\n CustomUserLDAPModelView,\n CustomUserOAuthModelView,\n CustomUserOIDModelView,\n CustomUserRemoteUserModelView,\n)\nfrom airflow.auth.managers.fab.views.user_edit import (\n CustomResetMyPasswordView,\n CustomResetPasswordView,\n CustomUserInfoEditView,\n)\nfrom airflow.auth.managers.fab.views.user_stats import CustomUserStatsChartView\nfrom airflow.exceptions import AirflowException, RemovedInAirflow3Warning\nfrom airflow.models import DagBag, DagModel\nfrom airflow.security import permissions\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.utils.session import NEW_SESSION, provide_session\nfrom airflow.www.extensions.init_auth_manager import get_auth_manager\nfrom airflow.www.fab_security.sqla.manager import SecurityManager\nfrom airflow.www.utils import CustomSQLAInterface\n\nEXISTING_ROLES = {\n \"Admin\",\n \"Viewer\",\n \"User\",\n \"Op\",\n \"Public\",\n}\n\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n\n SecurityManagerOverride: type = object\nelse:\n # Fetch the security manager override from the auth manager\n SecurityManagerOverride = get_auth_manager().get_security_manager_override_class()\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager, LoggingMixin):\n \"\"\"Custom security manager, which introduces a permission model adapted to Airflow.\"\"\"\n\n ###########################################################################\n # PERMISSIONS\n ###########################################################################\n\n # [START security_viewer_perms]\n VIEWER_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),\n ]\n # [END security_viewer_perms]\n\n # [START security_user_perms]\n USER_PERMISSIONS = [\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),\n ]\n # [END security_user_perms]\n\n # [START security_op_perms]\n OP_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),\n ]\n # [END security_op_perms]\n\n ADMIN_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),\n ]\n\n # global resource for dag-level access\n DAG_RESOURCES = {permissions.RESOURCE_DAG}\n DAG_ACTIONS = permissions.DAG_ACTIONS\n\n ###########################################################################\n # DEFAULT ROLE CONFIGURATIONS\n ###########################################################################\n\n ROLE_CONFIGS: list[dict[str, Any]] = [\n {\"role\": \"Public\", \"perms\": []},\n {\"role\": \"Viewer\", \"perms\": VIEWER_PERMISSIONS},\n {\n \"role\": \"User\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS,\n },\n {\n \"role\": \"Op\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,\n },\n {\n \"role\": \"Admin\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,\n },\n ]\n\n actionmodelview = ActionModelView\n permissionmodelview = PermissionPairModelView\n rolemodelview = CustomRoleModelView\n resourcemodelview = ResourceModelView\n userdbmodelview = CustomUserDBModelView\n resetmypasswordview = CustomResetMyPasswordView\n resetpasswordview = CustomResetPasswordView\n userinfoeditview = CustomUserInfoEditView\n userldapmodelview = CustomUserLDAPModelView\n useroauthmodelview = CustomUserOAuthModelView\n userremoteusermodelview = CustomUserRemoteUserModelView\n useroidmodelview = CustomUserOIDModelView\n userstatschartview = CustomUserStatsChartView\n\n def __init__(self, appbuilder) -> None:\n super().__init__(\n appbuilder=appbuilder,\n actionmodelview=self.actionmodelview,\n authdbview=self.authdbview,\n authldapview=self.authldapview,\n authoauthview=self.authoauthview,\n authoidview=self.authoidview,\n authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview,\n registeruser_view=self.registeruser_view,\n registeruserdbview=self.registeruserdbview,\n registeruseroauthview=self.registeruseroauthview,\n registerusermodelview=self.registerusermodelview,\n registeruseroidview=self.registeruseroidview,\n resetmypasswordview=self.resetmypasswordview,\n resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview,\n user_model=self.user_model,\n userinfoeditview=self.userinfoeditview,\n userdbmodelview=self.userdbmodelview,\n userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview,\n useroidmodelview=self.useroidmodelview,\n userremoteusermodelview=self.userremoteusermodelview,\n userstatschartview=self.userstatschartview,\n )\n\n # Go and fix up the SQLAInterface used from the stock one to our subclass.\n # This is needed to support the \"hack\" where we had to edit\n # FieldConverter.conversion_table in place in airflow.www.utils\n for attr in dir(self):\n if not attr.endswith(\"view\"):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, \"datamodel\", None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) -> str:\n if \".\" in dag_id:\n dm = self.appbuilder.get_session.execute(\n select(DagModel.dag_id, DagModel.root_dag_id).where(DagModel.dag_id == dag_id)\n ).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n\n def init_role(self, role_name, perms) -> None:\n \"\"\"\n Initialize the role with actions and related resources.\n\n :param role_name:\n :param perms:\n :return:\n \"\"\"\n warnings.warn(\n \"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n self.bulk_sync_roles([{\"role\": role_name, \"perms\": perms}])\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n\n for config in roles:\n role_name = config[\"role\"]\n perms = config[\"perms\"]\n role = existing_roles.get(role_name) or self.add_role(role_name)\n\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(\n action_name, resource_name\n )\n\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) -> Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n \"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)\n\n def get_editable_dags(self, user) -> Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n \"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)\n\n @provide_session\n def get_accessible_dags(\n self,\n user_actions: Container[str] | None,\n user,\n session: Session = NEW_SESSION,\n ) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=3,\n )\n dag_ids = self.get_accessible_dag_ids(user, user_actions, session)\n return session.scalars(select(DagModel).where(DagModel.dag_id.in_(dag_ids)))\n\n def get_readable_dag_ids(self, user) -> set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) -> set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(\n self,\n user,\n user_actions: Container[str] | None = None,\n session: Session = NEW_SESSION,\n ) -> set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]\n\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (\n permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)\n ):\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n user_query = session.scalar(\n select(User)\n .options(\n joinedload(User.roles)\n .subqueryload(Role.permissions)\n .options(joinedload(Permission.action), joinedload(Permission.resource))\n )\n .where(User.id == user.id)\n )\n roles = user_query.roles\n\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])\n else:\n resources.add(resource)\n return {\n dag.dag_id\n for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))\n }\n\n def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != \"~\":\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))\n\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)\n\n def can_edit_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG edit access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)\n\n def can_delete_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) -> str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n \"`prefixed_dag_id` has been deprecated. \"\n \"Please use `airflow.security.permissions.resource_name_for_dag` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) -> bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n\n return False\n\n def _has_role(self, role_name_or_list: Container, user) -> bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) -> bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return (\n self._has_role([\"Admin\", \"Viewer\", \"Op\", \"User\"], user)\n or self.can_read_all_dags(user)\n or self.can_edit_all_dags(user)\n )\n\n def can_edit_all_dags(self, user=None) -> bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) -> bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)\n\n def clean_perms(self) -> None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug(\"Cleaning faulty perms\")\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(\n or_(\n Permission.action == None, # noqa\n Permission.resource == None, # noqa\n )\n )\n # Since FAB doesn't define ON DELETE CASCADE on these tables, we need\n # to delete the _object_ so that SQLA knows to delete the many-to-many\n # relationship object too. :(\n\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info(\"Deleted %s faulty permissions\", deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) -> None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(\n select(self.permission_model).filter_by(action=action, resource=resource).limit(1)\n )\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) -> None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) -> set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n )\n )\n\n def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {\n (action_name, resource_name): viewmodel\n for action_name, resource_name, viewmodel in (\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name, self.permission_model)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n .where(~self.resource_model.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n )\n }\n\n def _get_all_roles_with_permissions(self) -> dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {\n r.name: r\n for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.permissions))\n ).unique()\n }\n\n def create_dag_specific_permissions(self) -> None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) -> None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()\n\n def sync_roles(self) -> None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\", dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\",\n dag_resource_name,\n )\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\", action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name != \"Admin\"]\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\n \"Revoking '%s' on DAG '%s' for role '%s'\",\n perm.action,\n dag_resource_name,\n role.name,\n )\n self.remove_permission_from_role(role, perm)\n\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named \"\n f\"'{rolename}', but that role does not exist\"\n )\n\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes \"\n f\"the following invalid permissions: {invalid_action_names}; \"\n f\"The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) -> None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n # create perm for global logical dag\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(\n self,\n perms: Sequence[tuple[str, str]] | None = None,\n dag_id: str | None = None,\n ) -> bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n\n for perm in perms:\n if perm in (\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n ):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n\n elif not self.has_access(*perm):\n return False\n\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: Session | None = None) -> None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: Session | None = None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-ids": [
33,
36,
40,
47,
49
]
}
|
[
33,
36,
40,
47,
49
] |
#
# Copyright (C) 2020 RFI
#
# Author: James Parkhurst
#
# This code is distributed under the GPLv3 license, a copy of
# which is included in the root directory of this package.
#
import logging
import numpy
from maptools.util import read, write
# Get the logger
logger = logging.getLogger(__name__)
def array_rebin(data, shape):
"""
Rebin a multidimensional array
Args:
data (array): The input array
shape (tuple): The new shape
"""
# Ensure dimensions are consistent
assert data.ndim == len(shape)
assert data.shape[0] % shape[0] == 0
assert data.shape[1] % shape[1] == 0
assert data.shape[2] % shape[2] == 0
# Get pairs of (shape, bin factor) for each dimension
factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])
# Rebin the array
data = data.reshape(factors.flatten())
for i in range(len(shape)):
data = data.sum(-1 * (i + 1))
return data
def mapfile_rebin(input_filename, output_filename, shape=None):
"""
Rebin the map
Args:
input_filename (str): The input map filename
output_filename (str): The output map filename
shape (tuple): The new shape of the map
"""
# Open the input file
infile = read(input_filename)
# Get the data
data = infile.data
# Get the subset of data
logger.info("Resampling map from shape %s to %s" % (data.shape, tuple(shape)))
data = array_rebin(data, shape)
# Write the output file
outfile = write(output_filename, data, infile=infile)
# Update the voxel size
outfile.voxel_size = (
outfile.voxel_size["z"] * data.shape[0] // shape[0],
outfile.voxel_size["y"] * data.shape[1] // shape[1],
outfile.voxel_size["x"] * data.shape[2] // shape[2],
)
def rebin(*args, **kwargs):
"""
Rebin the map
"""
if len(args) > 0 and type(args[0]) == "str" or "input_filename" in kwargs:
func = mapfile_rebin
else:
func = array_rebin
return func(*args, **kwargs)
|
normal
|
{
"blob_id": "18dc01f3e1672407800e53d80a85ffc8d5b86c17",
"index": 7497,
"step-1": "<mask token>\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-2": "<mask token>\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n infile = read(input_filename)\n data = infile.data\n logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(\n shape)))\n data = array_rebin(data, shape)\n outfile = write(output_filename, data, infile=infile)\n outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0\n ], outfile.voxel_size['y'] * data.shape[1] // shape[1\n ], outfile.voxel_size['x'] * data.shape[2] // shape[2]\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n infile = read(input_filename)\n data = infile.data\n logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(\n shape)))\n data = array_rebin(data, shape)\n outfile = write(output_filename, data, infile=infile)\n outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0\n ], outfile.voxel_size['y'] * data.shape[1] // shape[1\n ], outfile.voxel_size['x'] * data.shape[2] // shape[2]\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-4": "import logging\nimport numpy\nfrom maptools.util import read, write\nlogger = logging.getLogger(__name__)\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n infile = read(input_filename)\n data = infile.data\n logger.info('Resampling map from shape %s to %s' % (data.shape, tuple(\n shape)))\n data = array_rebin(data, shape)\n outfile = write(output_filename, data, infile=infile)\n outfile.voxel_size = outfile.voxel_size['z'] * data.shape[0] // shape[0\n ], outfile.voxel_size['y'] * data.shape[1] // shape[1\n ], outfile.voxel_size['x'] * data.shape[2] // shape[2]\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == 'str' or 'input_filename' in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-5": "#\n# Copyright (C) 2020 RFI\n#\n# Author: James Parkhurst\n#\n# This code is distributed under the GPLv3 license, a copy of\n# which is included in the root directory of this package.\n#\nimport logging\nimport numpy\nfrom maptools.util import read, write\n\n\n# Get the logger\nlogger = logging.getLogger(__name__)\n\n\ndef array_rebin(data, shape):\n \"\"\"\n Rebin a multidimensional array\n\n Args:\n data (array): The input array\n shape (tuple): The new shape\n\n \"\"\"\n\n # Ensure dimensions are consistent\n assert data.ndim == len(shape)\n assert data.shape[0] % shape[0] == 0\n assert data.shape[1] % shape[1] == 0\n assert data.shape[2] % shape[2] == 0\n\n # Get pairs of (shape, bin factor) for each dimension\n factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])\n\n # Rebin the array\n data = data.reshape(factors.flatten())\n for i in range(len(shape)):\n data = data.sum(-1 * (i + 1))\n return data\n\n\ndef mapfile_rebin(input_filename, output_filename, shape=None):\n \"\"\"\n Rebin the map\n\n Args:\n input_filename (str): The input map filename\n output_filename (str): The output map filename\n shape (tuple): The new shape of the map\n\n \"\"\"\n\n # Open the input file\n infile = read(input_filename)\n\n # Get the data\n data = infile.data\n\n # Get the subset of data\n logger.info(\"Resampling map from shape %s to %s\" % (data.shape, tuple(shape)))\n data = array_rebin(data, shape)\n\n # Write the output file\n outfile = write(output_filename, data, infile=infile)\n\n # Update the voxel size\n outfile.voxel_size = (\n outfile.voxel_size[\"z\"] * data.shape[0] // shape[0],\n outfile.voxel_size[\"y\"] * data.shape[1] // shape[1],\n outfile.voxel_size[\"x\"] * data.shape[2] // shape[2],\n )\n\n\ndef rebin(*args, **kwargs):\n \"\"\"\n Rebin the map\n\n \"\"\"\n if len(args) > 0 and type(args[0]) == \"str\" or \"input_filename\" in kwargs:\n func = mapfile_rebin\n else:\n func = array_rebin\n return func(*args, **kwargs)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def _MinutesToMicroseconds(minutes):
return minutes * 60 * 1000000
<|reserved_special_token_0|>
class _PickMaxRecord(beam.DoFn):
def process(self, data):
_, streams = data
time_dicts = []
info_dicts = []
metrics_dicts = []
abstract_metrics_dicts = []
for d in streams:
time_dicts.append(d['time'])
info_dicts.append(d['info'])
metrics_dicts.append(d['metrics'])
abstract_metrics_dicts.append(d['abstract_metrics'])
vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],
'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in
metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},
'abstract_metrics': {k: max([d[k] for d in
abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}
return [vm_sample]
<|reserved_special_token_0|>
def AlignByTime(data):
keyed_data = data | 'Flooring time' >> beam.Map(
_AssignUniqueIDAndFlooredTimeAsKey)
five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()
max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>
beam.ParDo(_PickMaxRecord()))
simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>
beam.Map(_VMSampleToSimulatedSample))
return simulated_sample
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _MinutesToMicroseconds(minutes):
return minutes * 60 * 1000000
<|reserved_special_token_0|>
class _PickMaxRecord(beam.DoFn):
def process(self, data):
_, streams = data
time_dicts = []
info_dicts = []
metrics_dicts = []
abstract_metrics_dicts = []
for d in streams:
time_dicts.append(d['time'])
info_dicts.append(d['info'])
metrics_dicts.append(d['metrics'])
abstract_metrics_dicts.append(d['abstract_metrics'])
vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],
'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in
metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},
'abstract_metrics': {k: max([d[k] for d in
abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}
return [vm_sample]
def _VMSampleToSimulatedSample(vm_sample):
simulated_sample = {'simulated_time': _MinutesToMicroseconds(5) * int(
np.floor(vm_sample['time'] / _MinutesToMicroseconds(5))),
'simulated_machine': str(vm_sample['info']['machine_id']), 'sample':
vm_sample}
return simulated_sample
def AlignByTime(data):
keyed_data = data | 'Flooring time' >> beam.Map(
_AssignUniqueIDAndFlooredTimeAsKey)
five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()
max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>
beam.ParDo(_PickMaxRecord()))
simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>
beam.Map(_VMSampleToSimulatedSample))
return simulated_sample
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _MinutesToMicroseconds(minutes):
return minutes * 60 * 1000000
def _AssignUniqueIDAndFlooredTimeAsKey(row):
return str(row['info']['unique_id']) + '-' + str(_MinutesToMicroseconds
(int(np.floor(row['time'] / _MinutesToMicroseconds(5))))), row
class _PickMaxRecord(beam.DoFn):
def process(self, data):
_, streams = data
time_dicts = []
info_dicts = []
metrics_dicts = []
abstract_metrics_dicts = []
for d in streams:
time_dicts.append(d['time'])
info_dicts.append(d['info'])
metrics_dicts.append(d['metrics'])
abstract_metrics_dicts.append(d['abstract_metrics'])
vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],
'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in
metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},
'abstract_metrics': {k: max([d[k] for d in
abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}
return [vm_sample]
def _VMSampleToSimulatedSample(vm_sample):
simulated_sample = {'simulated_time': _MinutesToMicroseconds(5) * int(
np.floor(vm_sample['time'] / _MinutesToMicroseconds(5))),
'simulated_machine': str(vm_sample['info']['machine_id']), 'sample':
vm_sample}
return simulated_sample
def AlignByTime(data):
keyed_data = data | 'Flooring time' >> beam.Map(
_AssignUniqueIDAndFlooredTimeAsKey)
five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()
max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>
beam.ParDo(_PickMaxRecord()))
simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>
beam.Map(_VMSampleToSimulatedSample))
return simulated_sample
<|reserved_special_token_1|>
import apache_beam as beam
import numpy as np
def _MinutesToMicroseconds(minutes):
return minutes * 60 * 1000000
def _AssignUniqueIDAndFlooredTimeAsKey(row):
return str(row['info']['unique_id']) + '-' + str(_MinutesToMicroseconds
(int(np.floor(row['time'] / _MinutesToMicroseconds(5))))), row
class _PickMaxRecord(beam.DoFn):
def process(self, data):
_, streams = data
time_dicts = []
info_dicts = []
metrics_dicts = []
abstract_metrics_dicts = []
for d in streams:
time_dicts.append(d['time'])
info_dicts.append(d['info'])
metrics_dicts.append(d['metrics'])
abstract_metrics_dicts.append(d['abstract_metrics'])
vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],
'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in
metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},
'abstract_metrics': {k: max([d[k] for d in
abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}
return [vm_sample]
def _VMSampleToSimulatedSample(vm_sample):
simulated_sample = {'simulated_time': _MinutesToMicroseconds(5) * int(
np.floor(vm_sample['time'] / _MinutesToMicroseconds(5))),
'simulated_machine': str(vm_sample['info']['machine_id']), 'sample':
vm_sample}
return simulated_sample
def AlignByTime(data):
keyed_data = data | 'Flooring time' >> beam.Map(
_AssignUniqueIDAndFlooredTimeAsKey)
five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()
max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>
beam.ParDo(_PickMaxRecord()))
simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>
beam.Map(_VMSampleToSimulatedSample))
return simulated_sample
<|reserved_special_token_1|>
# Copyright 2020 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
import numpy as np
def _MinutesToMicroseconds(minutes):
return minutes * 60 * 1_000_000
def _AssignUniqueIDAndFlooredTimeAsKey(row):
return (
str(row["info"]["unique_id"])
+ "-"
+ str(
_MinutesToMicroseconds(
int(np.floor(row["time"] / _MinutesToMicroseconds(5)))
)
),
row,
)
class _PickMaxRecord(beam.DoFn):
def process(self, data):
_, streams = data
time_dicts = []
info_dicts = []
metrics_dicts = []
abstract_metrics_dicts = []
for d in streams:
time_dicts.append(d["time"])
info_dicts.append(d["info"])
metrics_dicts.append(d["metrics"])
abstract_metrics_dicts.append(d["abstract_metrics"])
vm_sample = {
"time": time_dicts[0],
"info": info_dicts[0],
"metrics": {
k: np.nanmax(
np.nan_to_num(
np.array([d[k] for d in metrics_dicts], dtype=np.float64)
)
)
for k in metrics_dicts[0]
},
"abstract_metrics": {
k: max([d[k] for d in abstract_metrics_dicts])
for k in abstract_metrics_dicts[0]
},
}
return [vm_sample]
def _VMSampleToSimulatedSample(vm_sample):
simulated_sample = {
"simulated_time": _MinutesToMicroseconds(5)
* int(np.floor(vm_sample["time"] / _MinutesToMicroseconds(5))),
"simulated_machine": str(vm_sample["info"]["machine_id"]),
"sample": vm_sample,
}
return simulated_sample
def AlignByTime(data):
keyed_data = data | "Flooring time" >> beam.Map(_AssignUniqueIDAndFlooredTimeAsKey)
five_minute_groups = keyed_data | "Group Data by Keys" >> beam.GroupByKey()
max_record = five_minute_groups | "Pick Max Record in 5 Minutes" >> beam.ParDo(
_PickMaxRecord()
)
simulated_sample = max_record | "Change VMSample to SimulatedSammple" >> beam.Map(
_VMSampleToSimulatedSample
)
return simulated_sample
|
flexible
|
{
"blob_id": "d66945add0726c85b8ac29056269ed55c6eb9369",
"index": 3442,
"step-1": "<mask token>\n\n\ndef _MinutesToMicroseconds(minutes):\n return minutes * 60 * 1000000\n\n\n<mask token>\n\n\nclass _PickMaxRecord(beam.DoFn):\n\n def process(self, data):\n _, streams = data\n time_dicts = []\n info_dicts = []\n metrics_dicts = []\n abstract_metrics_dicts = []\n for d in streams:\n time_dicts.append(d['time'])\n info_dicts.append(d['info'])\n metrics_dicts.append(d['metrics'])\n abstract_metrics_dicts.append(d['abstract_metrics'])\n vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],\n 'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in\n metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},\n 'abstract_metrics': {k: max([d[k] for d in\n abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}\n return [vm_sample]\n\n\n<mask token>\n\n\ndef AlignByTime(data):\n keyed_data = data | 'Flooring time' >> beam.Map(\n _AssignUniqueIDAndFlooredTimeAsKey)\n five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()\n max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>\n beam.ParDo(_PickMaxRecord()))\n simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>\n beam.Map(_VMSampleToSimulatedSample))\n return simulated_sample\n",
"step-2": "<mask token>\n\n\ndef _MinutesToMicroseconds(minutes):\n return minutes * 60 * 1000000\n\n\n<mask token>\n\n\nclass _PickMaxRecord(beam.DoFn):\n\n def process(self, data):\n _, streams = data\n time_dicts = []\n info_dicts = []\n metrics_dicts = []\n abstract_metrics_dicts = []\n for d in streams:\n time_dicts.append(d['time'])\n info_dicts.append(d['info'])\n metrics_dicts.append(d['metrics'])\n abstract_metrics_dicts.append(d['abstract_metrics'])\n vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],\n 'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in\n metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},\n 'abstract_metrics': {k: max([d[k] for d in\n abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}\n return [vm_sample]\n\n\ndef _VMSampleToSimulatedSample(vm_sample):\n simulated_sample = {'simulated_time': _MinutesToMicroseconds(5) * int(\n np.floor(vm_sample['time'] / _MinutesToMicroseconds(5))),\n 'simulated_machine': str(vm_sample['info']['machine_id']), 'sample':\n vm_sample}\n return simulated_sample\n\n\ndef AlignByTime(data):\n keyed_data = data | 'Flooring time' >> beam.Map(\n _AssignUniqueIDAndFlooredTimeAsKey)\n five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()\n max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>\n beam.ParDo(_PickMaxRecord()))\n simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>\n beam.Map(_VMSampleToSimulatedSample))\n return simulated_sample\n",
"step-3": "<mask token>\n\n\ndef _MinutesToMicroseconds(minutes):\n return minutes * 60 * 1000000\n\n\ndef _AssignUniqueIDAndFlooredTimeAsKey(row):\n return str(row['info']['unique_id']) + '-' + str(_MinutesToMicroseconds\n (int(np.floor(row['time'] / _MinutesToMicroseconds(5))))), row\n\n\nclass _PickMaxRecord(beam.DoFn):\n\n def process(self, data):\n _, streams = data\n time_dicts = []\n info_dicts = []\n metrics_dicts = []\n abstract_metrics_dicts = []\n for d in streams:\n time_dicts.append(d['time'])\n info_dicts.append(d['info'])\n metrics_dicts.append(d['metrics'])\n abstract_metrics_dicts.append(d['abstract_metrics'])\n vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],\n 'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in\n metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},\n 'abstract_metrics': {k: max([d[k] for d in\n abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}\n return [vm_sample]\n\n\ndef _VMSampleToSimulatedSample(vm_sample):\n simulated_sample = {'simulated_time': _MinutesToMicroseconds(5) * int(\n np.floor(vm_sample['time'] / _MinutesToMicroseconds(5))),\n 'simulated_machine': str(vm_sample['info']['machine_id']), 'sample':\n vm_sample}\n return simulated_sample\n\n\ndef AlignByTime(data):\n keyed_data = data | 'Flooring time' >> beam.Map(\n _AssignUniqueIDAndFlooredTimeAsKey)\n five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()\n max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>\n beam.ParDo(_PickMaxRecord()))\n simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>\n beam.Map(_VMSampleToSimulatedSample))\n return simulated_sample\n",
"step-4": "import apache_beam as beam\nimport numpy as np\n\n\ndef _MinutesToMicroseconds(minutes):\n return minutes * 60 * 1000000\n\n\ndef _AssignUniqueIDAndFlooredTimeAsKey(row):\n return str(row['info']['unique_id']) + '-' + str(_MinutesToMicroseconds\n (int(np.floor(row['time'] / _MinutesToMicroseconds(5))))), row\n\n\nclass _PickMaxRecord(beam.DoFn):\n\n def process(self, data):\n _, streams = data\n time_dicts = []\n info_dicts = []\n metrics_dicts = []\n abstract_metrics_dicts = []\n for d in streams:\n time_dicts.append(d['time'])\n info_dicts.append(d['info'])\n metrics_dicts.append(d['metrics'])\n abstract_metrics_dicts.append(d['abstract_metrics'])\n vm_sample = {'time': time_dicts[0], 'info': info_dicts[0],\n 'metrics': {k: np.nanmax(np.nan_to_num(np.array([d[k] for d in\n metrics_dicts], dtype=np.float64))) for k in metrics_dicts[0]},\n 'abstract_metrics': {k: max([d[k] for d in\n abstract_metrics_dicts]) for k in abstract_metrics_dicts[0]}}\n return [vm_sample]\n\n\ndef _VMSampleToSimulatedSample(vm_sample):\n simulated_sample = {'simulated_time': _MinutesToMicroseconds(5) * int(\n np.floor(vm_sample['time'] / _MinutesToMicroseconds(5))),\n 'simulated_machine': str(vm_sample['info']['machine_id']), 'sample':\n vm_sample}\n return simulated_sample\n\n\ndef AlignByTime(data):\n keyed_data = data | 'Flooring time' >> beam.Map(\n _AssignUniqueIDAndFlooredTimeAsKey)\n five_minute_groups = keyed_data | 'Group Data by Keys' >> beam.GroupByKey()\n max_record = (five_minute_groups | 'Pick Max Record in 5 Minutes' >>\n beam.ParDo(_PickMaxRecord()))\n simulated_sample = (max_record | 'Change VMSample to SimulatedSammple' >>\n beam.Map(_VMSampleToSimulatedSample))\n return simulated_sample\n",
"step-5": "# Copyright 2020 Google LLC.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport apache_beam as beam\nimport numpy as np\n\n\ndef _MinutesToMicroseconds(minutes):\n return minutes * 60 * 1_000_000\n\n\ndef _AssignUniqueIDAndFlooredTimeAsKey(row):\n return (\n str(row[\"info\"][\"unique_id\"])\n + \"-\"\n + str(\n _MinutesToMicroseconds(\n int(np.floor(row[\"time\"] / _MinutesToMicroseconds(5)))\n )\n ),\n row,\n )\n\n\nclass _PickMaxRecord(beam.DoFn):\n def process(self, data):\n _, streams = data\n\n time_dicts = []\n info_dicts = []\n metrics_dicts = []\n abstract_metrics_dicts = []\n for d in streams:\n time_dicts.append(d[\"time\"])\n info_dicts.append(d[\"info\"])\n metrics_dicts.append(d[\"metrics\"])\n abstract_metrics_dicts.append(d[\"abstract_metrics\"])\n vm_sample = {\n \"time\": time_dicts[0],\n \"info\": info_dicts[0],\n \"metrics\": {\n k: np.nanmax(\n np.nan_to_num(\n np.array([d[k] for d in metrics_dicts], dtype=np.float64)\n )\n )\n for k in metrics_dicts[0]\n },\n \"abstract_metrics\": {\n k: max([d[k] for d in abstract_metrics_dicts])\n for k in abstract_metrics_dicts[0]\n },\n }\n return [vm_sample]\n\n\ndef _VMSampleToSimulatedSample(vm_sample):\n simulated_sample = {\n \"simulated_time\": _MinutesToMicroseconds(5)\n * int(np.floor(vm_sample[\"time\"] / _MinutesToMicroseconds(5))),\n \"simulated_machine\": str(vm_sample[\"info\"][\"machine_id\"]),\n \"sample\": vm_sample,\n }\n return simulated_sample\n\n\ndef AlignByTime(data):\n keyed_data = data | \"Flooring time\" >> beam.Map(_AssignUniqueIDAndFlooredTimeAsKey)\n five_minute_groups = keyed_data | \"Group Data by Keys\" >> beam.GroupByKey()\n max_record = five_minute_groups | \"Pick Max Record in 5 Minutes\" >> beam.ParDo(\n _PickMaxRecord()\n )\n simulated_sample = max_record | \"Change VMSample to SimulatedSammple\" >> beam.Map(\n _VMSampleToSimulatedSample\n )\n return simulated_sample\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def initialize_worker():
worker = PrStatusWorker()
worker.start_pr_status_polling()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def initialize_worker():
worker = PrStatusWorker()
worker.start_pr_status_polling()
print('Starting the PR status monitor worker thread...')
<|reserved_special_token_0|>
worker_thread.start()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def initialize_worker():
worker = PrStatusWorker()
worker.start_pr_status_polling()
print('Starting the PR status monitor worker thread...')
worker_thread = threading.Thread(target=initialize_worker, name=
'pr_status_worker')
worker_thread.start()
<|reserved_special_token_1|>
from PrStatusWorker import PrStatusWorker
import threading
def initialize_worker():
worker = PrStatusWorker()
worker.start_pr_status_polling()
print('Starting the PR status monitor worker thread...')
worker_thread = threading.Thread(target=initialize_worker, name=
'pr_status_worker')
worker_thread.start()
<|reserved_special_token_1|>
from PrStatusWorker import PrStatusWorker
import threading
def initialize_worker():
worker = PrStatusWorker()
worker.start_pr_status_polling()
print("Starting the PR status monitor worker thread...")
worker_thread = threading.Thread(target=initialize_worker, name="pr_status_worker")
worker_thread.start()
|
flexible
|
{
"blob_id": "4b5f58d471b05428caef3ca7a3bdc0d30a7e3881",
"index": 5265,
"step-1": "<mask token>\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\nprint('Starting the PR status monitor worker thread...')\n<mask token>\nworker_thread.start()\n",
"step-3": "<mask token>\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\nprint('Starting the PR status monitor worker thread...')\nworker_thread = threading.Thread(target=initialize_worker, name=\n 'pr_status_worker')\nworker_thread.start()\n",
"step-4": "from PrStatusWorker import PrStatusWorker\nimport threading\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\nprint('Starting the PR status monitor worker thread...')\nworker_thread = threading.Thread(target=initialize_worker, name=\n 'pr_status_worker')\nworker_thread.start()\n",
"step-5": "\nfrom PrStatusWorker import PrStatusWorker\nimport threading\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\nprint(\"Starting the PR status monitor worker thread...\")\nworker_thread = threading.Thread(target=initialize_worker, name=\"pr_status_worker\")\nworker_thread.start()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Start the HTML and Javascript code
print '''
<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["treemap"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
'''
print CountBugs('path/to/repo')
# Finish the HTML and Javascript
print '''
]);
// Create and draw the visualization.
var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));
tree.draw(data, {
maxDepth: 2,
minColor: 'YellowGreen',
midColor: 'LightGoldenRodYellow',
maxColor: 'Red',
headerHeight: 15,
fontColor: 'black',
showScale: true});
}
</script>
</head>
<body>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
</body>
</html>
'''
|
normal
|
{
"blob_id": "0e112ecfd4ccf762234dff564dd6f3987418dedd",
"index": 1033,
"step-1": "# Start the HTML and Javascript code\nprint '''\n<html>\n <head>\n <script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>\n <script type=\"text/javascript\">\n google.load(\"visualization\", \"1\", {packages:[\"treemap\"]});\n google.setOnLoadCallback(drawChart);\n function drawChart() {\n'''\n\nprint CountBugs('path/to/repo')\n\n# Finish the HTML and Javascript\nprint '''\n ]);\n\n // Create and draw the visualization.\n var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));\n tree.draw(data, {\n maxDepth: 2,\n minColor: 'YellowGreen',\n midColor: 'LightGoldenRodYellow',\n maxColor: 'Red',\n headerHeight: 15,\n fontColor: 'black',\n showScale: true});\n }\n </script>\n </head>\n\n <body>\n <div id=\"chart_div\" style=\"width: 900px; height: 500px;\"></div>\n </body>\n</html>\n'''\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
print('Hi, I am Nag')
|
normal
|
{
"blob_id": "0ca751e050244fd85c8110d02d5e7a79eb449ada",
"index": 8542,
"step-1": "<mask token>\n",
"step-2": "print('Hi, I am Nag')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [([False] * (m + 1)) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n + 1):
for j in range(1, m + 1):
if a[j - 1] == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1]
elif a[j - 1].upper() == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]
elif a[j - 1].islower():
dp[i][j] = dp[i][j - 1]
return 'YES' if dp[n][m] else 'NO'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [([False] * (m + 1)) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n + 1):
for j in range(1, m + 1):
if a[j - 1] == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1]
elif a[j - 1].upper() == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]
elif a[j - 1].islower():
dp[i][j] = dp[i][j - 1]
return 'YES' if dp[n][m] else 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import math
import os
import random
import re
import sys
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [([False] * (m + 1)) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n + 1):
for j in range(1, m + 1):
if a[j - 1] == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1]
elif a[j - 1].upper() == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]
elif a[j - 1].islower():
dp[i][j] = dp[i][j - 1]
return 'YES' if dp[n][m] else 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
<|reserved_special_token_1|>
"""
You can perform the following operations on the string, :
Capitalize zero or more of 's lowercase letters.
Delete all of the remaining lowercase letters in .
Given two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO.
For example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed.
Function Description
Complete the function in the editor below. It must return either or .
abbreviation has the following parameter(s):
a: the string to modify
b: the string to match
Input Format
The first line contains a single integer , the number of queries.
Each of the next pairs of lines is as follows:
- The first line of each query contains a single string, .
- The second line of each query contains a single string, .
Constraints
String consists only of uppercase and lowercase English letters, ascii[A-Za-z].
String consists only of uppercase English letters, ascii[A-Z].
Output Format
For each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO.
Sample Input
1
daBcd
ABC
Sample Output
YES
Explanation
image
We have daBcd and ABC. We perform the following operation:
Capitalize the letters a and c in so that dABCd.
Delete all the remaining lowercase letters in so that ABC.
Because we were able to successfully convert to , we print YES on a new line.
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the abbreviation function below.
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [[False]*(m+1) for _ in range(n+1)]
dp[0][0] = True
for i in range(n+1):
for j in range(1,m+1):
if a[j-1] == b[i-1]:
dp[i][j] = dp[i-1][j-1]
elif a[j-1].upper() == b[i-1]:
dp[i][j] = dp[i-1][j-1] or dp[i][j-1]
elif a[j-1].islower():
dp[i][j] = dp[i][j-1]
return "YES" if dp[n][m] else "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
|
flexible
|
{
"blob_id": "5fb998fa761b989c6dd423634824197bade4f8a5",
"index": 23,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n q = int(input())\n for q_itr in range(q):\n a = input()\n b = input()\n result = abbreviation(a, b)\n fptr.write(result + '\\n')\n fptr.close()\n",
"step-4": "<mask token>\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n q = int(input())\n for q_itr in range(q):\n a = input()\n b = input()\n result = abbreviation(a, b)\n fptr.write(result + '\\n')\n fptr.close()\n",
"step-5": "\"\"\"\nYou can perform the following operations on the string, :\n\nCapitalize zero or more of 's lowercase letters.\nDelete all of the remaining lowercase letters in .\nGiven two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO.\n\nFor example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed.\n\nFunction Description\n\nComplete the function in the editor below. It must return either or .\n\nabbreviation has the following parameter(s):\n\na: the string to modify\nb: the string to match\nInput Format\n\nThe first line contains a single integer , the number of queries.\n\nEach of the next pairs of lines is as follows:\n- The first line of each query contains a single string, .\n- The second line of each query contains a single string, .\n\nConstraints\n\nString consists only of uppercase and lowercase English letters, ascii[A-Za-z].\nString consists only of uppercase English letters, ascii[A-Z].\nOutput Format\n\nFor each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO.\n\nSample Input\n\n1\ndaBcd\nABC\nSample Output\n\nYES\nExplanation\n\nimage\n\nWe have daBcd and ABC. We perform the following operation:\n\nCapitalize the letters a and c in so that dABCd.\nDelete all the remaining lowercase letters in so that ABC.\nBecause we were able to successfully convert to , we print YES on a new line.\n\n\n\"\"\"\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Complete the abbreviation function below.\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [[False]*(m+1) for _ in range(n+1)]\n dp[0][0] = True\n for i in range(n+1):\n for j in range(1,m+1):\n if a[j-1] == b[i-1]:\n dp[i][j] = dp[i-1][j-1]\n elif a[j-1].upper() == b[i-1]:\n dp[i][j] = dp[i-1][j-1] or dp[i][j-1]\n elif a[j-1].islower():\n dp[i][j] = dp[i][j-1]\n return \"YES\" if dp[n][m] else \"NO\"\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n a = input()\n\n b = input()\n\n result = abbreviation(a, b)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
"""
Script that generates the photon efficiency curves and stores them in a root
file.
For the moment only the pT curves for the different eta bins are created
"""
import re
import json
import ROOT as r
r.PyConfig.IgnoreCommandLineOptions = True
import numpy as np
import sympy as sp
from utils.symbolic import func_cov
from utils.graph_utils import get_lower_band, get_upper_band
from common_func import get_name
# Covariance matrix from the fit integrated over the whole eta range, where
# alpha and beta were fixed. This will be used to calculate the correlation
# coefficients between the fitted parameters, which will then be used to get
# the uncertainty bands for the parametrization
COVARIANCE = np.array([
[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06],
[1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06],
[-4.328e-06, -1.714e-05, 4.228e-05, -1.481e-05],
[4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05],
])
# corr = diag(cov)^{-1/2} * cov * diag(cov)^{-1/2}
CORRELATIONS = np.matmul(
np.matmul(
np.diag(1/np.sqrt(np.diag(COVARIANCE))), COVARIANCE,
), np.diag(1/np.sqrt(np.diag(COVARIANCE)))
)
def eff_param_string():
"""
The parametrization of the efficiencies from AN-2015-11 as a string that can
be used in a TF1 constructor.
p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))
"""
return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'
def eff_param():
"""
Get the parametrization as ROOT.TF1
"""
return r.TF1('photon_eff_param', eff_param_string(), 0, 7)
def eff_param_sym():
"""
Get the parametrization as sympy symbolic expression by doing some string
manipulation on the parametrization and then using sympy.sympify
"""
param_str = eff_param_string()
# replace call to ROOTs erf and give x[0] a parseable name
param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')
# convert parameters from [x] notation to px notation
param_str = re.sub(r'\[([0-9])\]', r'p\1', param_str)
# replace pow(x, y) with x**y (pythonic) syntax
param_str = re.sub(r'pow\((.*?)\s*?,\s*?([0-9])\)', r'\1**\2', param_str)
return sp.sympify(param_str)
def get_corr_subs_values(corr):
"""
Get the dictionary of substitution values for the correlation matrix
"""
subs_dict = {}
n_dim = corr.shape[0]
for irow in xrange(0, n_dim):
for icol in xrange(irow + 1, n_dim):
subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]
return subs_dict
def get_cov_func(params, corr):
"""
Get the uncertainty function where only pT is left as a free parameter.
This will return a python function that can be evaluated at any given point
"""
eff = eff_param_sym()
# get the list of free parameters
free_params = []
for sym in eff.free_symbols:
if sym.name in params and params[sym.name][1] != 0:
free_params.append(sym)
# sort the parameters according to their name, such that the correlation
# coefficients actually match
free_params.sort(key=lambda x: int(x.name.replace('p', '')))
cov_eff = func_cov(eff, free_params)
# build up the dictionary of symbol -> value that will be substituted.
# In the end the returned function will only have one free parameter left
subst_vals = {
p: v[0] for p, v in params.iteritems()
}
subst_vals.update({
'sigma_' + p: v[1] for p, v in params.iteritems()
})
subst_vals.update(
get_corr_subs_values(corr)
)
# NOTE: here it is assumed that 'x' is the only free parameter left
return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))
def get_graph_err(params, corr, n_sigma=1.0, n_points=100):
"""
Get the function evaluated at n_points with uncertainties taking into
account correlations between the parameters
"""
# central function
eff_f = eff_param_sym()
eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})
# NOTE: assume that 'x' is the only free parameter left
eff_f = sp.lambdify(sp.symbols('x'), eff_f)
# uncertainty function (as function of pT)
var_f = get_cov_func(params, corr)
x_bins = np.linspace(0.4, 7, n_points + 1)
x_cent = 0.5 * (x_bins[1:] + x_bins[:-1]) # bin centers
x_err = np.diff(x_bins) # "uncertainties" in x
y_cent = np.array([eff_f(x) for x in x_cent])
y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma
return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)
def set_params_errors(func, *params):
"""
Set all the parameters as pairs of value and uncertainty (in the order they)
are in the params list. If uncertainty = 0, the parameter is fixed
"""
central = np.array([p[0] for p in params])
uncer = np.array([p[1] for p in params])
func.SetParameters(central)
func.SetParErrors(uncer)
for idx, err in enumerate(uncer):
if err == 0:
func.FixParameter(idx, func.GetParameter(idx))
def load_params(param_file):
"""
Load the parameter file and return the list of dicts stored in it
"""
with open(param_file, 'r') as pfile:
eff_params = json.load(pfile)
return eff_params
def create_param(params, sigma_shift, uncorrelated):
"""
Create the function from the passed params and give it an appropriate name
"""
# if the central results are desired. Use the exact parametrization as TF1
if sigma_shift == 0:
func = eff_param()
set_params_errors(func, params["p0"], params["p1"], params["p2"],
params["p3"], params["alpha"], params["beta"])
func.SetName(get_name(params["eta"], 'photon_eff_pt'))
return func
# else get an aproximation by evaluating the function at a given number of
# points and determine the uncertainties at these points, then store the
# points as a TGraph where the y-values are the central + uncertainty values
# at each evaluation point
# NOTE: Since eff_param_sym names alpha and beta p4 and p5 respectively
# (can't use beta in an expression that goes through sympy.sympify), we have
# to clone them here. We can leave the original values in, since they will
# not be picked up by the substitution command
params['p4'] = params['alpha']
params['p5'] = params['beta']
# use the global correlation matrix or an identity matrix if uncorrelated
# parameters are desired
corr = np.identity(4) if uncorrelated else CORRELATIONS
graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)
if sigma_shift < 0:
graph = get_lower_band(graph)
else:
graph = get_upper_band(graph)
graph.SetName(get_name(params['eta'], 'photon_eff_pt'))
return graph
def main(args):
"""Main"""
file_option = 'update' if args.update else 'recreate'
outfile = r.TFile.Open(args.outfile, file_option)
all_params = load_params(args.paramfile)
for params in all_params:
eff = create_param(params, args.sigma, args.uncorrelated)
eff.Write()
outfile.Close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='script to generate TF1 '
'photon efficiency parametrizations from '
'json file holding the fit parameters')
parser.add_argument('paramfile', help='json file containing the fitted '
'parameters')
parser.add_argument('-o', '--outfile', help='root file into which the TF1 '
'should be stored', default='photon_effs_param.root')
parser.add_argument('-u', '--update', help='update the output file instead '
'of recreating it', default=False, action='store_true')
parser.add_argument('-s', '--sigma', help='Use the central value + [sigma] '
'* uncertainty for each parameter', type=float,
default=0)
parser.add_argument('--uncorrelated', default=False, action='store_true',
help='Assume that the free parameters are uncorrelated '
'instead of using correlation parameters from a global '
'fit')
clargs = parser.parse_args()
main(clargs)
|
normal
|
{
"blob_id": "fd450b5454b65ed69b411028788c587f9674760c",
"index": 966,
"step-1": "<mask token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<mask token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<mask token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<mask token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<mask token>\n",
"step-4": "<mask token>\nr.PyConfig.IgnoreCommandLineOptions = True\n<mask token>\nCOVARIANCE = np.array([[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06], [\n 1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06], [-4.328e-06, -1.714e-05, \n 4.228e-05, -1.481e-05], [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05]])\nCORRELATIONS = np.matmul(np.matmul(np.diag(1 / np.sqrt(np.diag(COVARIANCE))\n ), COVARIANCE), np.diag(1 / np.sqrt(np.diag(COVARIANCE))))\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'script to generate TF1 photon efficiency parametrizations from json file holding the fit parameters'\n )\n parser.add_argument('paramfile', help=\n 'json file containing the fitted parameters')\n parser.add_argument('-o', '--outfile', help=\n 'root file into which the TF1 should be stored', default=\n 'photon_effs_param.root')\n parser.add_argument('-u', '--update', help=\n 'update the output file instead of recreating it', default=False,\n action='store_true')\n parser.add_argument('-s', '--sigma', help=\n 'Use the central value + [sigma] * uncertainty for each parameter',\n type=float, default=0)\n parser.add_argument('--uncorrelated', default=False, action=\n 'store_true', help=\n 'Assume that the free parameters are uncorrelated instead of using correlation parameters from a global fit'\n )\n clargs = parser.parse_args()\n main(clargs)\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nScript that generates the photon efficiency curves and stores them in a root\nfile.\n\nFor the moment only the pT curves for the different eta bins are created\n\"\"\"\n\nimport re\nimport json\nimport ROOT as r\nr.PyConfig.IgnoreCommandLineOptions = True\n\nimport numpy as np\nimport sympy as sp\n\nfrom utils.symbolic import func_cov\nfrom utils.graph_utils import get_lower_band, get_upper_band\n\nfrom common_func import get_name\n\n# Covariance matrix from the fit integrated over the whole eta range, where\n# alpha and beta were fixed. This will be used to calculate the correlation\n# coefficients between the fitted parameters, which will then be used to get\n# the uncertainty bands for the parametrization\nCOVARIANCE = np.array([\n [1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06],\n [1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06],\n [-4.328e-06, -1.714e-05, 4.228e-05, -1.481e-05],\n [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05],\n])\n\n# corr = diag(cov)^{-1/2} * cov * diag(cov)^{-1/2}\nCORRELATIONS = np.matmul(\n np.matmul(\n np.diag(1/np.sqrt(np.diag(COVARIANCE))), COVARIANCE,\n ), np.diag(1/np.sqrt(np.diag(COVARIANCE)))\n)\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n # replace call to ROOTs erf and give x[0] a parseable name\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n # convert parameters from [x] notation to px notation\n param_str = re.sub(r'\\[([0-9])\\]', r'p\\1', param_str)\n # replace pow(x, y) with x**y (pythonic) syntax\n param_str = re.sub(r'pow\\((.*?)\\s*?,\\s*?([0-9])\\)', r'\\1**\\2', param_str)\n\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n # get the list of free parameters\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n\n # sort the parameters according to their name, such that the correlation\n # coefficients actually match\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n\n cov_eff = func_cov(eff, free_params)\n\n # build up the dictionary of symbol -> value that will be substituted.\n # In the end the returned function will only have one free parameter left\n subst_vals = {\n p: v[0] for p, v in params.iteritems()\n }\n subst_vals.update({\n 'sigma_' + p: v[1] for p, v in params.iteritems()\n })\n subst_vals.update(\n get_corr_subs_values(corr)\n )\n\n # NOTE: here it is assumed that 'x' is the only free parameter left\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n # central function\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n # NOTE: assume that 'x' is the only free parameter left\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n\n # uncertainty function (as function of pT)\n var_f = get_cov_func(params, corr)\n\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1]) # bin centers\n x_err = np.diff(x_bins) # \"uncertainties\" in x\n\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n\n func.SetParameters(central)\n func.SetParErrors(uncer)\n\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n # if the central results are desired. Use the exact parametrization as TF1\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params[\"p0\"], params[\"p1\"], params[\"p2\"],\n params[\"p3\"], params[\"alpha\"], params[\"beta\"])\n\n func.SetName(get_name(params[\"eta\"], 'photon_eff_pt'))\n return func\n\n # else get an aproximation by evaluating the function at a given number of\n # points and determine the uncertainties at these points, then store the\n # points as a TGraph where the y-values are the central + uncertainty values\n # at each evaluation point\n\n # NOTE: Since eff_param_sym names alpha and beta p4 and p5 respectively\n # (can't use beta in an expression that goes through sympy.sympify), we have\n # to clone them here. We can leave the original values in, since they will\n # not be picked up by the substitution command\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n\n # use the global correlation matrix or an identity matrix if uncorrelated\n # parameters are desired\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='script to generate TF1 '\n 'photon efficiency parametrizations from '\n 'json file holding the fit parameters')\n parser.add_argument('paramfile', help='json file containing the fitted '\n 'parameters')\n parser.add_argument('-o', '--outfile', help='root file into which the TF1 '\n 'should be stored', default='photon_effs_param.root')\n parser.add_argument('-u', '--update', help='update the output file instead '\n 'of recreating it', default=False, action='store_true')\n parser.add_argument('-s', '--sigma', help='Use the central value + [sigma] '\n '* uncertainty for each parameter', type=float,\n default=0)\n parser.add_argument('--uncorrelated', default=False, action='store_true',\n help='Assume that the free parameters are uncorrelated '\n 'instead of using correlation parameters from a global '\n 'fit')\n\n clargs = parser.parse_args()\n main(clargs)\n",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
print("Hello world! im in github")
|
normal
|
{
"blob_id": "2db6f88b733c23063803c374d7a5b651e8443bd5",
"index": 6135,
"step-1": "<mask token>\n",
"step-2": "print('Hello world! im in github')\n",
"step-3": "print(\"Hello world! im in github\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__path__.append(
'/cvmfs/cms.cern.ch/slc6_amd64_gcc481/cms/cmssw-patch/CMSSW_7_0_6_patch3/python/ggAnalysis'
)
|
flexible
|
{
"blob_id": "0345c3c2049c972370cd7bde5a6e0a1dfa5dfe66",
"index": 3719,
"step-1": "<mask token>\n",
"step-2": "__path__.append(\n '/cvmfs/cms.cern.ch/slc6_amd64_gcc481/cms/cmssw-patch/CMSSW_7_0_6_patch3/python/ggAnalysis'\n )\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def calculateFuel(weight):
return weight // 3 - 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculateWeight(weight):
fuel = calculateFuel(weight)
if fuel > 0:
sum = fuel + calculateWeight(fuel)
return sum
else:
return max(0, fuel)
def calculateFuel(weight):
return weight // 3 - 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculateWeight(weight):
fuel = calculateFuel(weight)
if fuel > 0:
sum = fuel + calculateWeight(fuel)
return sum
else:
return max(0, fuel)
def calculateFuel(weight):
return weight // 3 - 2
for line in source.readlines():
total += calculateWeight(int(line))
print(total)
<|reserved_special_token_1|>
source = open('input.txt', 'r')
total = 0
def calculateWeight(weight):
fuel = calculateFuel(weight)
if fuel > 0:
sum = fuel + calculateWeight(fuel)
return sum
else:
return max(0, fuel)
def calculateFuel(weight):
return weight // 3 - 2
for line in source.readlines():
total += calculateWeight(int(line))
print(total)
<|reserved_special_token_1|>
source = open("input.txt", "r")
total = 0
def calculateWeight( weight ):
fuel = calculateFuel(weight)
if fuel > 0:
sum = fuel + calculateWeight(fuel)
return sum
else:
return max(0, fuel)
def calculateFuel ( weight ):
return weight // 3 -2
for line in source.readlines():
total += calculateWeight(int(line))
print(total)
|
flexible
|
{
"blob_id": "bea1a5bc9c92d095a2f187a4c06d18d0a939f233",
"index": 3376,
"step-1": "<mask token>\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\nprint(total)\n",
"step-4": "source = open('input.txt', 'r')\ntotal = 0\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\nprint(total)\n",
"step-5": "source = open(\"input.txt\", \"r\")\ntotal = 0\n\ndef calculateWeight( weight ):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel ( weight ):\n return weight // 3 -2\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\n\nprint(total)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
<|reserved_special_token_0|>
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
<|reserved_special_token_0|>
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
<|reserved_special_token_0|>
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
if __name__ == '__main__':
rospy.init_node('keep_yaw')
movement_nullifier = MovementNullifier()
rospy.spin()
<|reserved_special_token_1|>
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty, EmptyResponse
import tf
from math import radians, degrees, fabs
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
if __name__ == '__main__':
rospy.init_node('keep_yaw')
movement_nullifier = MovementNullifier()
rospy.spin()
<|reserved_special_token_1|>
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty, EmptyResponse
import tf
from math import radians, degrees, fabs
class MovementNullifier:
def __init__(self):
rospy.Subscriber("odom", Odometry, self.OdomCallback)
rospy.Subscriber("cmd_vel", Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher("cmd_vel", Twist, queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01;
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service("stop_nullify", Empty, self.StopListening)
self.start_service = rospy.Service("start_nullify", Empty, self.StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
#self.Zero()
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
#print "Turning with velocity: %f" % (self.velocity)
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding: # lets counter react movement
pose = data.pose
quaternion = (pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
#print "Yaw: %f deg, Position x: %f" % (degrees(euler[2]), pose.pose.position.x)
#print "Turn: %r, Move: %r, First: %r" % (self.turn, self.move, self.first)
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
#print "Start yaw: %f" % (self.start_yaw)
#print "Start x: %f" % (self.start_x)
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
#print "Distance %f, prev distance: %f" % (self.distance, self.prev_distance)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
#print "Angle: %f" % self.angle
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
#print "Yaw: start %f, new %f" % (self.start_yaw, yaw)
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
#print "Position: start %f, new %f" % (self.start_x, x_position)
if self.move and (self.distance > self.prev_distance):
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
#print 'Resetting...'
self.first = True
self.angle = 0.0
if __name__ == "__main__":
rospy.init_node("keep_yaw")
movement_nullifier = MovementNullifier()
rospy.spin()
|
flexible
|
{
"blob_id": "c349fa484476e3195e0932e425cbe93d7a7e5394",
"index": 1225,
"step-1": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n <mask token>\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n <mask token>\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n <mask token>\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\nif __name__ == '__main__':\n rospy.init_node('keep_yaw')\n movement_nullifier = MovementNullifier()\n rospy.spin()\n",
"step-4": "import rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom std_srvs.srv import Empty, EmptyResponse\nimport tf\nfrom math import radians, degrees, fabs\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\nif __name__ == '__main__':\n rospy.init_node('keep_yaw')\n movement_nullifier = MovementNullifier()\n rospy.spin()\n",
"step-5": "#!/usr/bin/env python\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom std_srvs.srv import Empty, EmptyResponse\nimport tf\nfrom math import radians, degrees, fabs\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber(\"odom\", Odometry, self.OdomCallback)\n rospy.Subscriber(\"cmd_vel\", Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher(\"cmd_vel\", Twist, queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01;\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service(\"stop_nullify\", Empty, self.StopListening)\n self.start_service = rospy.Service(\"start_nullify\", Empty, self.StartListening)\n self.keep_running = True\n \n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n \n def StartListening(self, data):\n self.keep_running = True\n #self.Zero()\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n \n def Turn(self):\n #print \"Turning with velocity: %f\" % (self.velocity)\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n \n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n def TwistCallback(self, data):\n \n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False \n \n def OdomCallback(self, data):\n \n if not self.keep_running:\n return\n \n twist = data.twist\n \n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n \n if not self.cmd_is_commanding: # lets counter react movement\n pose = data.pose\n quaternion = (pose.pose.orientation.x,\n pose.pose.orientation.y,\n pose.pose.orientation.z,\n pose.pose.orientation.w)\n \n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n \n x_position = pose.pose.position.x\n #print \"Yaw: %f deg, Position x: %f\" % (degrees(euler[2]), pose.pose.position.x)\n \n #print \"Turn: %r, Move: %r, First: %r\" % (self.turn, self.move, self.first)\n \n if self.turn:\n self.Turn()\n \n if self.move:\n self.Move()\n \n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp \n self.Zero() \n #print \"Start yaw: %f\" % (self.start_yaw) \n #print \"Start x: %f\" % (self.start_x) \n else: \n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n #print \"Distance %f, prev distance: %f\" % (self.distance, self.prev_distance)\n \n if self.angle >= 0.5: \n self.turn = True\n \n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n \n #print \"Angle: %f\" % self.angle\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n #print \"Yaw: start %f, new %f\" % (self.start_yaw, yaw)\n \n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n #print \"Position: start %f, new %f\" % (self.start_x, x_position)\n \n if self.move and (self.distance > self.prev_distance):\n self.move = False\n self.Zero()\n \n if self.distance >= 0.01:\n self.move = True\n \n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n \n self.prev_distance = self.distance\n \n else:\n #print 'Resetting...'\n self.first = True\n self.angle = 0.0\n \n \n \nif __name__ == \"__main__\":\n rospy.init_node(\"keep_yaw\")\n \n movement_nullifier = MovementNullifier()\n \n rospy.spin()",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
class GameSequence:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
<|reserved_special_token_0|>
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameSequence:
<|reserved_special_token_0|>
def __init__(self, ArrayofPlayers):
if len(ArrayofPlayers) < 2:
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK, NOTHING]
self.currentMode = NOTHING
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
<|reserved_special_token_0|>
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameSequence:
<|reserved_special_token_0|>
def __init__(self, ArrayofPlayers):
if len(ArrayofPlayers) < 2:
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK, NOTHING]
self.currentMode = NOTHING
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
def startTurn(self):
self.players[self.currentTurn].changeTurn(True)
"""
maybe some camera change animation to player location
"""
return
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameSequence:
"""
GameSequence summary: Keeps track of player turn sequence and Game end
Functionalities
-start game
-must start turns
-change turns
-end turns
-end game
"""
def __init__(self, ArrayofPlayers):
if len(ArrayofPlayers) < 2:
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK, NOTHING]
self.currentMode = NOTHING
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
def startTurn(self):
self.players[self.currentTurn].changeTurn(True)
"""
maybe some camera change animation to player location
"""
return
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
<|reserved_special_token_1|>
from Player import Player
class GameSequence:
'''
GameSequence summary: Keeps track of player turn sequence and Game end
Functionalities
-start game
-must start turns
-change turns
-end turns
-end game
'''
def __init__(self, ArrayofPlayers):
if (len(ArrayofPlayers) < 2):
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK,NOTHING]
self.currentMode = NOTHING
def changeMode(self,number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
'''
does some intro animaton -> starts game
'''
return
def startTurn(self):
self.players[self.currentTurn].changeTurn(True)
'''
maybe some camera change animation to player location
'''
return
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
|
flexible
|
{
"blob_id": "bdfd941be29a31d6c1bbedd270dadac844f49fc4",
"index": 1198,
"step-1": "<mask token>\n\n\nclass GameSequence:\n <mask token>\n <mask token>\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n <mask token>\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-2": "<mask token>\n\n\nclass GameSequence:\n <mask token>\n\n def __init__(self, ArrayofPlayers):\n if len(ArrayofPlayers) < 2:\n return False\n self.players = ArrayofPlayers\n self.currentTurn = None\n NOTHING = 2\n ATTACK = 1\n MOVE = 0\n self.modes = [MOVE, ATTACK, NOTHING]\n self.currentMode = NOTHING\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n <mask token>\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-3": "<mask token>\n\n\nclass GameSequence:\n <mask token>\n\n def __init__(self, ArrayofPlayers):\n if len(ArrayofPlayers) < 2:\n return False\n self.players = ArrayofPlayers\n self.currentTurn = None\n NOTHING = 2\n ATTACK = 1\n MOVE = 0\n self.modes = [MOVE, ATTACK, NOTHING]\n self.currentMode = NOTHING\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n\n def startTurn(self):\n self.players[self.currentTurn].changeTurn(True)\n \"\"\"\n maybe some camera change animation to player location\n \"\"\"\n return\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-4": "<mask token>\n\n\nclass GameSequence:\n \"\"\"\n GameSequence summary: Keeps track of player turn sequence and Game end\n Functionalities\n -start game\n -must start turns\n -change turns\n -end turns\n -end game\n\n \"\"\"\n\n def __init__(self, ArrayofPlayers):\n if len(ArrayofPlayers) < 2:\n return False\n self.players = ArrayofPlayers\n self.currentTurn = None\n NOTHING = 2\n ATTACK = 1\n MOVE = 0\n self.modes = [MOVE, ATTACK, NOTHING]\n self.currentMode = NOTHING\n\n def changeMode(self, number):\n self.currentMode = self.modes[number]\n\n def startGame(self):\n self.currentTurn = 0\n \"\"\"\n does some intro animaton -> starts game\n\n \"\"\"\n return\n\n def startTurn(self):\n self.players[self.currentTurn].changeTurn(True)\n \"\"\"\n maybe some camera change animation to player location\n \"\"\"\n return\n\n def getCurrentPlayer(self):\n return self.players[self.currentTurn]\n\n def changeTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n self.currentTurn += 1\n self.currentTurn = self.currentTurn % len(self.players)\n\n def endTurn(self):\n self.players[self.currentTurn].changeTurn(False)\n",
"step-5": "from Player import Player\r\n\r\n\r\n\r\n\r\nclass GameSequence:\r\n '''\r\n GameSequence summary: Keeps track of player turn sequence and Game end\r\n Functionalities\r\n -start game\r\n -must start turns\r\n -change turns\r\n -end turns\r\n -end game\r\n\r\n '''\r\n\r\n def __init__(self, ArrayofPlayers):\r\n if (len(ArrayofPlayers) < 2):\r\n return False\r\n\r\n self.players = ArrayofPlayers\r\n self.currentTurn = None\r\n NOTHING = 2\r\n ATTACK = 1\r\n MOVE = 0\r\n\r\n self.modes = [MOVE, ATTACK,NOTHING]\r\n self.currentMode = NOTHING\r\n\r\n def changeMode(self,number):\r\n self.currentMode = self.modes[number]\r\n def startGame(self):\r\n self.currentTurn = 0\r\n '''\r\n does some intro animaton -> starts game\r\n\r\n '''\r\n return\r\n def startTurn(self):\r\n self.players[self.currentTurn].changeTurn(True)\r\n '''\r\n maybe some camera change animation to player location\r\n '''\r\n return\r\n\r\n def getCurrentPlayer(self):\r\n return self.players[self.currentTurn]\r\n\r\n def changeTurn(self):\r\n self.players[self.currentTurn].changeTurn(False)\r\n self.currentTurn += 1\r\n self.currentTurn = self.currentTurn % len(self.players)\r\n\r\n def endTurn(self):\r\n self.players[self.currentTurn].changeTurn(False)\r\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
class TimerTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
child = models.Child.objects.create(first_name='First', last_name=
'Last', birth_date=timezone.localdate())
self.user = User.objects.first()
self.named = models.Timer.objects.create(name='Named', end=timezone
.localtime(), user=self.user, child=child)
self.unnamed = models.Timer.objects.create(end=timezone.localtime(),
user=self.user)
def test_timer_create(self):
self.assertEqual(self.named, models.Timer.objects.get(name='Named'))
self.assertEqual(str(self.named), 'Named')
self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))
self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)
)
def test_timer_title_with_child(self):
self.assertEqual(self.named.title_with_child, str(self.named))
models.Child.objects.create(first_name='Child', last_name='Two',
birth_date=timezone.localdate())
self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(
self.named), str(self.named.child)))
def test_timer_user_username(self):
self.assertEqual(self.named.user_username, self.user.get_username())
self.user.first_name = 'User'
self.user.last_name = 'Name'
self.user.save()
self.assertEqual(self.named.user_username, self.user.get_full_name())
def test_timer_restart(self):
self.named.restart()
self.assertIsNone(self.named.end)
self.assertIsNone(self.named.duration)
self.assertTrue(self.named.active)
<|reserved_special_token_0|>
def test_timer_duration(self):
timer = models.Timer.objects.create(user=User.objects.first())
timer.start = timezone.localtime() - timezone.timedelta(minutes=30)
timer.save()
timer.refresh_from_db()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
timer.stop()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
class TummyTimeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_tummytime_create(self):
tummy_time = models.TummyTime.objects.create(child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime())
self.assertEqual(tummy_time, models.TummyTime.objects.first())
self.assertEqual(str(tummy_time), 'Tummy Time')
self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SleepTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_sleep_create(self):
sleep = models.Sleep.objects.create(child=self.child, start=
timezone.localtime() - timezone.timedelta(minutes=30), end=
timezone.localtime())
self.assertEqual(sleep, models.Sleep.objects.first())
self.assertEqual(str(sleep), 'Sleep')
self.assertEqual(sleep.duration, sleep.end - sleep.start)
class TemperatureTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
self.temp = models.Temperature.objects.create(child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1),
temperature=98.6)
def test_temperature_create(self):
self.assertEqual(self.temp, models.Temperature.objects.first())
self.assertEqual(str(self.temp), 'Temperature')
self.assertEqual(self.temp.temperature, 98.6)
class TimerTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
child = models.Child.objects.create(first_name='First', last_name=
'Last', birth_date=timezone.localdate())
self.user = User.objects.first()
self.named = models.Timer.objects.create(name='Named', end=timezone
.localtime(), user=self.user, child=child)
self.unnamed = models.Timer.objects.create(end=timezone.localtime(),
user=self.user)
def test_timer_create(self):
self.assertEqual(self.named, models.Timer.objects.get(name='Named'))
self.assertEqual(str(self.named), 'Named')
self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))
self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)
)
def test_timer_title_with_child(self):
self.assertEqual(self.named.title_with_child, str(self.named))
models.Child.objects.create(first_name='Child', last_name='Two',
birth_date=timezone.localdate())
self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(
self.named), str(self.named.child)))
def test_timer_user_username(self):
self.assertEqual(self.named.user_username, self.user.get_username())
self.user.first_name = 'User'
self.user.last_name = 'Name'
self.user.save()
self.assertEqual(self.named.user_username, self.user.get_full_name())
def test_timer_restart(self):
self.named.restart()
self.assertIsNone(self.named.end)
self.assertIsNone(self.named.duration)
self.assertTrue(self.named.active)
def test_timer_stop(self):
stop_time = timezone.localtime()
self.unnamed.stop(end=stop_time)
self.assertEqual(self.unnamed.end, stop_time)
self.assertEqual(self.unnamed.duration.seconds, (self.unnamed.end -
self.unnamed.start).seconds)
self.assertFalse(self.unnamed.active)
def test_timer_duration(self):
timer = models.Timer.objects.create(user=User.objects.first())
timer.start = timezone.localtime() - timezone.timedelta(minutes=30)
timer.save()
timer.refresh_from_db()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
timer.stop()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
class TummyTimeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_tummytime_create(self):
tummy_time = models.TummyTime.objects.create(child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime())
self.assertEqual(tummy_time, models.TummyTime.objects.first())
self.assertEqual(str(tummy_time), 'Tummy Time')
self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DiaperChangeTestCase(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class FeedingTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_feeding_create(self):
feeding = models.Feeding.objects.create(child=self.child, start=
timezone.localtime() - timezone.timedelta(minutes=30), end=
timezone.localtime(), type='formula', method='bottle', amount=2)
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.duration, feeding.end - feeding.start)
def test_method_both_breasts(self):
feeding = models.Feeding.objects.create(child=self.child, start=
timezone.localtime() - timezone.timedelta(minutes=30), end=
timezone.localtime(), type='breast milk', method='both breasts')
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.method, 'both breasts')
class NoteTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_note_create(self):
note = models.Note.objects.create(child=self.child, note='Note',
time=timezone.localtime())
self.assertEqual(note, models.Note.objects.first())
self.assertEqual(str(note), 'Note')
class SleepTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_sleep_create(self):
sleep = models.Sleep.objects.create(child=self.child, start=
timezone.localtime() - timezone.timedelta(minutes=30), end=
timezone.localtime())
self.assertEqual(sleep, models.Sleep.objects.first())
self.assertEqual(str(sleep), 'Sleep')
self.assertEqual(sleep.duration, sleep.end - sleep.start)
class TemperatureTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
self.temp = models.Temperature.objects.create(child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1),
temperature=98.6)
def test_temperature_create(self):
self.assertEqual(self.temp, models.Temperature.objects.first())
self.assertEqual(str(self.temp), 'Temperature')
self.assertEqual(self.temp.temperature, 98.6)
class TimerTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
child = models.Child.objects.create(first_name='First', last_name=
'Last', birth_date=timezone.localdate())
self.user = User.objects.first()
self.named = models.Timer.objects.create(name='Named', end=timezone
.localtime(), user=self.user, child=child)
self.unnamed = models.Timer.objects.create(end=timezone.localtime(),
user=self.user)
def test_timer_create(self):
self.assertEqual(self.named, models.Timer.objects.get(name='Named'))
self.assertEqual(str(self.named), 'Named')
self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))
self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)
)
def test_timer_title_with_child(self):
self.assertEqual(self.named.title_with_child, str(self.named))
models.Child.objects.create(first_name='Child', last_name='Two',
birth_date=timezone.localdate())
self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(
self.named), str(self.named.child)))
def test_timer_user_username(self):
self.assertEqual(self.named.user_username, self.user.get_username())
self.user.first_name = 'User'
self.user.last_name = 'Name'
self.user.save()
self.assertEqual(self.named.user_username, self.user.get_full_name())
def test_timer_restart(self):
self.named.restart()
self.assertIsNone(self.named.end)
self.assertIsNone(self.named.duration)
self.assertTrue(self.named.active)
def test_timer_stop(self):
stop_time = timezone.localtime()
self.unnamed.stop(end=stop_time)
self.assertEqual(self.unnamed.end, stop_time)
self.assertEqual(self.unnamed.duration.seconds, (self.unnamed.end -
self.unnamed.start).seconds)
self.assertFalse(self.unnamed.active)
def test_timer_duration(self):
timer = models.Timer.objects.create(user=User.objects.first())
timer.start = timezone.localtime() - timezone.timedelta(minutes=30)
timer.save()
timer.refresh_from_db()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
timer.stop()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
class TummyTimeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_tummytime_create(self):
tummy_time = models.TummyTime.objects.create(child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime())
self.assertEqual(tummy_time, models.TummyTime.objects.first())
self.assertEqual(str(tummy_time), 'Tummy Time')
self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DiaperChangeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
self.change = models.DiaperChange.objects.create(child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1), wet=1,
solid=1, color='black', amount=1.25)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class FeedingTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_feeding_create(self):
feeding = models.Feeding.objects.create(child=self.child, start=
timezone.localtime() - timezone.timedelta(minutes=30), end=
timezone.localtime(), type='formula', method='bottle', amount=2)
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.duration, feeding.end - feeding.start)
def test_method_both_breasts(self):
feeding = models.Feeding.objects.create(child=self.child, start=
timezone.localtime() - timezone.timedelta(minutes=30), end=
timezone.localtime(), type='breast milk', method='both breasts')
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.method, 'both breasts')
class NoteTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_note_create(self):
note = models.Note.objects.create(child=self.child, note='Note',
time=timezone.localtime())
self.assertEqual(note, models.Note.objects.first())
self.assertEqual(str(note), 'Note')
class SleepTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_sleep_create(self):
sleep = models.Sleep.objects.create(child=self.child, start=
timezone.localtime() - timezone.timedelta(minutes=30), end=
timezone.localtime())
self.assertEqual(sleep, models.Sleep.objects.first())
self.assertEqual(str(sleep), 'Sleep')
self.assertEqual(sleep.duration, sleep.end - sleep.start)
class TemperatureTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
self.temp = models.Temperature.objects.create(child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1),
temperature=98.6)
def test_temperature_create(self):
self.assertEqual(self.temp, models.Temperature.objects.first())
self.assertEqual(str(self.temp), 'Temperature')
self.assertEqual(self.temp.temperature, 98.6)
class TimerTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
child = models.Child.objects.create(first_name='First', last_name=
'Last', birth_date=timezone.localdate())
self.user = User.objects.first()
self.named = models.Timer.objects.create(name='Named', end=timezone
.localtime(), user=self.user, child=child)
self.unnamed = models.Timer.objects.create(end=timezone.localtime(),
user=self.user)
def test_timer_create(self):
self.assertEqual(self.named, models.Timer.objects.get(name='Named'))
self.assertEqual(str(self.named), 'Named')
self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))
self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)
)
def test_timer_title_with_child(self):
self.assertEqual(self.named.title_with_child, str(self.named))
models.Child.objects.create(first_name='Child', last_name='Two',
birth_date=timezone.localdate())
self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(
self.named), str(self.named.child)))
def test_timer_user_username(self):
self.assertEqual(self.named.user_username, self.user.get_username())
self.user.first_name = 'User'
self.user.last_name = 'Name'
self.user.save()
self.assertEqual(self.named.user_username, self.user.get_full_name())
def test_timer_restart(self):
self.named.restart()
self.assertIsNone(self.named.end)
self.assertIsNone(self.named.duration)
self.assertTrue(self.named.active)
def test_timer_stop(self):
stop_time = timezone.localtime()
self.unnamed.stop(end=stop_time)
self.assertEqual(self.unnamed.end, stop_time)
self.assertEqual(self.unnamed.duration.seconds, (self.unnamed.end -
self.unnamed.start).seconds)
self.assertFalse(self.unnamed.active)
def test_timer_duration(self):
timer = models.Timer.objects.create(user=User.objects.first())
timer.start = timezone.localtime() - timezone.timedelta(minutes=30)
timer.save()
timer.refresh_from_db()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
timer.stop()
self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes
=30).seconds)
class TummyTimeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(first_name='First',
last_name='Last', birth_date=timezone.localdate())
def test_tummytime_create(self):
tummy_time = models.TummyTime.objects.create(child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime())
self.assertEqual(tummy_time, models.TummyTime.objects.first())
self.assertEqual(str(tummy_time), 'Tummy Time')
self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start
)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from core import models
class ChildTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
def test_child_create(self):
child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.assertEqual(child, models.Child.objects.get(first_name='First'))
self.assertEqual(child.slug, 'first-last')
self.assertEqual(str(child), 'First Last')
self.assertEqual(child.name(), 'First Last')
self.assertEqual(child.name(reverse=True), 'Last, First')
def test_child_count(self):
self.assertEqual(models.Child.count(), 0)
models.Child.objects.create(
first_name='First 1',
last_name='Last 1',
birth_date=timezone.localdate()
)
self.assertEqual(models.Child.count(), 1)
child = models.Child.objects.create(
first_name='First 2',
last_name='Last 2',
birth_date=timezone.localdate()
)
self.assertEqual(models.Child.count(), 2)
child.delete()
self.assertEqual(models.Child.count(), 1)
class DiaperChangeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.change = models.DiaperChange.objects.create(
child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1),
wet=1,
solid=1,
color='black',
amount=1.25
)
def test_diaperchange_create(self):
self.assertEqual(self.change, models.DiaperChange.objects.first())
self.assertEqual(str(self.change), 'Diaper Change')
self.assertEqual(self.change.child, self.child)
self.assertTrue(self.change.wet)
self.assertTrue(self.change.solid)
self.assertEqual(self.change.color, 'black')
self.assertEqual(self.change.amount, 1.25)
def test_diaperchange_attributes(self):
self.assertListEqual(
self.change.attributes(), ['Wet', 'Solid', 'Black'])
class FeedingTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_feeding_create(self):
feeding = models.Feeding.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
type='formula',
method='bottle',
amount=2
)
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.duration, feeding.end - feeding.start)
def test_method_both_breasts(self):
feeding = models.Feeding.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
type='breast milk',
method='both breasts'
)
self.assertEqual(feeding, models.Feeding.objects.first())
self.assertEqual(str(feeding), 'Feeding')
self.assertEqual(feeding.method, 'both breasts')
class NoteTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_note_create(self):
note = models.Note.objects.create(
child=self.child, note='Note', time=timezone.localtime())
self.assertEqual(note, models.Note.objects.first())
self.assertEqual(str(note), 'Note')
class SleepTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_sleep_create(self):
sleep = models.Sleep.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
)
self.assertEqual(sleep, models.Sleep.objects.first())
self.assertEqual(str(sleep), 'Sleep')
self.assertEqual(sleep.duration, sleep.end - sleep.start)
class TemperatureTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.temp = models.Temperature.objects.create(
child=self.child,
time=timezone.localtime() - timezone.timedelta(days=1),
temperature=98.6
)
def test_temperature_create(self):
self.assertEqual(self.temp, models.Temperature.objects.first())
self.assertEqual(str(self.temp), 'Temperature')
self.assertEqual(self.temp.temperature, 98.6)
class TimerTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
self.user = User.objects.first()
self.named = models.Timer.objects.create(
name='Named',
end=timezone.localtime(),
user=self.user,
child=child
)
self.unnamed = models.Timer.objects.create(
end=timezone.localtime(),
user=self.user
)
def test_timer_create(self):
self.assertEqual(self.named, models.Timer.objects.get(name='Named'))
self.assertEqual(str(self.named), 'Named')
self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))
self.assertEqual(
str(self.unnamed), 'Timer #{}'.format(self.unnamed.id))
def test_timer_title_with_child(self):
self.assertEqual(self.named.title_with_child, str(self.named))
models.Child.objects.create(
first_name='Child',
last_name='Two',
birth_date=timezone.localdate()
)
self.assertEqual(
self.named.title_with_child,
'{} ({})'.format(str(self.named), str(self.named.child))
)
def test_timer_user_username(self):
self.assertEqual(self.named.user_username, self.user.get_username())
self.user.first_name = 'User'
self.user.last_name = 'Name'
self.user.save()
self.assertEqual(self.named.user_username, self.user.get_full_name())
def test_timer_restart(self):
self.named.restart()
self.assertIsNone(self.named.end)
self.assertIsNone(self.named.duration)
self.assertTrue(self.named.active)
def test_timer_stop(self):
stop_time = timezone.localtime()
self.unnamed.stop(end=stop_time)
self.assertEqual(self.unnamed.end, stop_time)
self.assertEqual(
self.unnamed.duration.seconds,
(self.unnamed.end - self.unnamed.start).seconds)
self.assertFalse(self.unnamed.active)
def test_timer_duration(self):
timer = models.Timer.objects.create(user=User.objects.first())
# Timer.start uses auto_now_add, so it cannot be set in create().
timer.start = timezone.localtime() - timezone.timedelta(minutes=30)
timer.save()
timer.refresh_from_db()
self.assertEqual(
timer.duration.seconds,
timezone.timedelta(minutes=30).seconds)
timer.stop()
self.assertEqual(
timer.duration.seconds,
timezone.timedelta(minutes=30).seconds)
class TummyTimeTestCase(TestCase):
def setUp(self):
call_command('migrate', verbosity=0)
self.child = models.Child.objects.create(
first_name='First',
last_name='Last',
birth_date=timezone.localdate()
)
def test_tummytime_create(self):
tummy_time = models.TummyTime.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=30),
end=timezone.localtime(),
)
self.assertEqual(tummy_time, models.TummyTime.objects.first())
self.assertEqual(str(tummy_time), 'Tummy Time')
self.assertEqual(
tummy_time.duration, tummy_time.end - tummy_time.start)
|
flexible
|
{
"blob_id": "135401ea495b80fc1d09d6919ccec8640cb328ce",
"index": 3901,
"step-1": "<mask token>\n\n\nclass TimerTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n child = models.Child.objects.create(first_name='First', last_name=\n 'Last', birth_date=timezone.localdate())\n self.user = User.objects.first()\n self.named = models.Timer.objects.create(name='Named', end=timezone\n .localtime(), user=self.user, child=child)\n self.unnamed = models.Timer.objects.create(end=timezone.localtime(),\n user=self.user)\n\n def test_timer_create(self):\n self.assertEqual(self.named, models.Timer.objects.get(name='Named'))\n self.assertEqual(str(self.named), 'Named')\n self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))\n self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)\n )\n\n def test_timer_title_with_child(self):\n self.assertEqual(self.named.title_with_child, str(self.named))\n models.Child.objects.create(first_name='Child', last_name='Two',\n birth_date=timezone.localdate())\n self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(\n self.named), str(self.named.child)))\n\n def test_timer_user_username(self):\n self.assertEqual(self.named.user_username, self.user.get_username())\n self.user.first_name = 'User'\n self.user.last_name = 'Name'\n self.user.save()\n self.assertEqual(self.named.user_username, self.user.get_full_name())\n\n def test_timer_restart(self):\n self.named.restart()\n self.assertIsNone(self.named.end)\n self.assertIsNone(self.named.duration)\n self.assertTrue(self.named.active)\n <mask token>\n\n def test_timer_duration(self):\n timer = models.Timer.objects.create(user=User.objects.first())\n timer.start = timezone.localtime() - timezone.timedelta(minutes=30)\n timer.save()\n timer.refresh_from_db()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n timer.stop()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n\n\nclass TummyTimeTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_tummytime_create(self):\n tummy_time = models.TummyTime.objects.create(child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime())\n self.assertEqual(tummy_time, models.TummyTime.objects.first())\n self.assertEqual(str(tummy_time), 'Tummy Time')\n self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start\n )\n",
"step-2": "<mask token>\n\n\nclass SleepTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_sleep_create(self):\n sleep = models.Sleep.objects.create(child=self.child, start=\n timezone.localtime() - timezone.timedelta(minutes=30), end=\n timezone.localtime())\n self.assertEqual(sleep, models.Sleep.objects.first())\n self.assertEqual(str(sleep), 'Sleep')\n self.assertEqual(sleep.duration, sleep.end - sleep.start)\n\n\nclass TemperatureTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n self.temp = models.Temperature.objects.create(child=self.child,\n time=timezone.localtime() - timezone.timedelta(days=1),\n temperature=98.6)\n\n def test_temperature_create(self):\n self.assertEqual(self.temp, models.Temperature.objects.first())\n self.assertEqual(str(self.temp), 'Temperature')\n self.assertEqual(self.temp.temperature, 98.6)\n\n\nclass TimerTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n child = models.Child.objects.create(first_name='First', last_name=\n 'Last', birth_date=timezone.localdate())\n self.user = User.objects.first()\n self.named = models.Timer.objects.create(name='Named', end=timezone\n .localtime(), user=self.user, child=child)\n self.unnamed = models.Timer.objects.create(end=timezone.localtime(),\n user=self.user)\n\n def test_timer_create(self):\n self.assertEqual(self.named, models.Timer.objects.get(name='Named'))\n self.assertEqual(str(self.named), 'Named')\n self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))\n self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)\n )\n\n def test_timer_title_with_child(self):\n self.assertEqual(self.named.title_with_child, str(self.named))\n models.Child.objects.create(first_name='Child', last_name='Two',\n birth_date=timezone.localdate())\n self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(\n self.named), str(self.named.child)))\n\n def test_timer_user_username(self):\n self.assertEqual(self.named.user_username, self.user.get_username())\n self.user.first_name = 'User'\n self.user.last_name = 'Name'\n self.user.save()\n self.assertEqual(self.named.user_username, self.user.get_full_name())\n\n def test_timer_restart(self):\n self.named.restart()\n self.assertIsNone(self.named.end)\n self.assertIsNone(self.named.duration)\n self.assertTrue(self.named.active)\n\n def test_timer_stop(self):\n stop_time = timezone.localtime()\n self.unnamed.stop(end=stop_time)\n self.assertEqual(self.unnamed.end, stop_time)\n self.assertEqual(self.unnamed.duration.seconds, (self.unnamed.end -\n self.unnamed.start).seconds)\n self.assertFalse(self.unnamed.active)\n\n def test_timer_duration(self):\n timer = models.Timer.objects.create(user=User.objects.first())\n timer.start = timezone.localtime() - timezone.timedelta(minutes=30)\n timer.save()\n timer.refresh_from_db()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n timer.stop()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n\n\nclass TummyTimeTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_tummytime_create(self):\n tummy_time = models.TummyTime.objects.create(child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime())\n self.assertEqual(tummy_time, models.TummyTime.objects.first())\n self.assertEqual(str(tummy_time), 'Tummy Time')\n self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start\n )\n",
"step-3": "<mask token>\n\n\nclass DiaperChangeTestCase(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass FeedingTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_feeding_create(self):\n feeding = models.Feeding.objects.create(child=self.child, start=\n timezone.localtime() - timezone.timedelta(minutes=30), end=\n timezone.localtime(), type='formula', method='bottle', amount=2)\n self.assertEqual(feeding, models.Feeding.objects.first())\n self.assertEqual(str(feeding), 'Feeding')\n self.assertEqual(feeding.duration, feeding.end - feeding.start)\n\n def test_method_both_breasts(self):\n feeding = models.Feeding.objects.create(child=self.child, start=\n timezone.localtime() - timezone.timedelta(minutes=30), end=\n timezone.localtime(), type='breast milk', method='both breasts')\n self.assertEqual(feeding, models.Feeding.objects.first())\n self.assertEqual(str(feeding), 'Feeding')\n self.assertEqual(feeding.method, 'both breasts')\n\n\nclass NoteTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_note_create(self):\n note = models.Note.objects.create(child=self.child, note='Note',\n time=timezone.localtime())\n self.assertEqual(note, models.Note.objects.first())\n self.assertEqual(str(note), 'Note')\n\n\nclass SleepTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_sleep_create(self):\n sleep = models.Sleep.objects.create(child=self.child, start=\n timezone.localtime() - timezone.timedelta(minutes=30), end=\n timezone.localtime())\n self.assertEqual(sleep, models.Sleep.objects.first())\n self.assertEqual(str(sleep), 'Sleep')\n self.assertEqual(sleep.duration, sleep.end - sleep.start)\n\n\nclass TemperatureTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n self.temp = models.Temperature.objects.create(child=self.child,\n time=timezone.localtime() - timezone.timedelta(days=1),\n temperature=98.6)\n\n def test_temperature_create(self):\n self.assertEqual(self.temp, models.Temperature.objects.first())\n self.assertEqual(str(self.temp), 'Temperature')\n self.assertEqual(self.temp.temperature, 98.6)\n\n\nclass TimerTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n child = models.Child.objects.create(first_name='First', last_name=\n 'Last', birth_date=timezone.localdate())\n self.user = User.objects.first()\n self.named = models.Timer.objects.create(name='Named', end=timezone\n .localtime(), user=self.user, child=child)\n self.unnamed = models.Timer.objects.create(end=timezone.localtime(),\n user=self.user)\n\n def test_timer_create(self):\n self.assertEqual(self.named, models.Timer.objects.get(name='Named'))\n self.assertEqual(str(self.named), 'Named')\n self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))\n self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)\n )\n\n def test_timer_title_with_child(self):\n self.assertEqual(self.named.title_with_child, str(self.named))\n models.Child.objects.create(first_name='Child', last_name='Two',\n birth_date=timezone.localdate())\n self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(\n self.named), str(self.named.child)))\n\n def test_timer_user_username(self):\n self.assertEqual(self.named.user_username, self.user.get_username())\n self.user.first_name = 'User'\n self.user.last_name = 'Name'\n self.user.save()\n self.assertEqual(self.named.user_username, self.user.get_full_name())\n\n def test_timer_restart(self):\n self.named.restart()\n self.assertIsNone(self.named.end)\n self.assertIsNone(self.named.duration)\n self.assertTrue(self.named.active)\n\n def test_timer_stop(self):\n stop_time = timezone.localtime()\n self.unnamed.stop(end=stop_time)\n self.assertEqual(self.unnamed.end, stop_time)\n self.assertEqual(self.unnamed.duration.seconds, (self.unnamed.end -\n self.unnamed.start).seconds)\n self.assertFalse(self.unnamed.active)\n\n def test_timer_duration(self):\n timer = models.Timer.objects.create(user=User.objects.first())\n timer.start = timezone.localtime() - timezone.timedelta(minutes=30)\n timer.save()\n timer.refresh_from_db()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n timer.stop()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n\n\nclass TummyTimeTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_tummytime_create(self):\n tummy_time = models.TummyTime.objects.create(child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime())\n self.assertEqual(tummy_time, models.TummyTime.objects.first())\n self.assertEqual(str(tummy_time), 'Tummy Time')\n self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start\n )\n",
"step-4": "<mask token>\n\n\nclass DiaperChangeTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n self.change = models.DiaperChange.objects.create(child=self.child,\n time=timezone.localtime() - timezone.timedelta(days=1), wet=1,\n solid=1, color='black', amount=1.25)\n <mask token>\n <mask token>\n\n\nclass FeedingTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_feeding_create(self):\n feeding = models.Feeding.objects.create(child=self.child, start=\n timezone.localtime() - timezone.timedelta(minutes=30), end=\n timezone.localtime(), type='formula', method='bottle', amount=2)\n self.assertEqual(feeding, models.Feeding.objects.first())\n self.assertEqual(str(feeding), 'Feeding')\n self.assertEqual(feeding.duration, feeding.end - feeding.start)\n\n def test_method_both_breasts(self):\n feeding = models.Feeding.objects.create(child=self.child, start=\n timezone.localtime() - timezone.timedelta(minutes=30), end=\n timezone.localtime(), type='breast milk', method='both breasts')\n self.assertEqual(feeding, models.Feeding.objects.first())\n self.assertEqual(str(feeding), 'Feeding')\n self.assertEqual(feeding.method, 'both breasts')\n\n\nclass NoteTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_note_create(self):\n note = models.Note.objects.create(child=self.child, note='Note',\n time=timezone.localtime())\n self.assertEqual(note, models.Note.objects.first())\n self.assertEqual(str(note), 'Note')\n\n\nclass SleepTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_sleep_create(self):\n sleep = models.Sleep.objects.create(child=self.child, start=\n timezone.localtime() - timezone.timedelta(minutes=30), end=\n timezone.localtime())\n self.assertEqual(sleep, models.Sleep.objects.first())\n self.assertEqual(str(sleep), 'Sleep')\n self.assertEqual(sleep.duration, sleep.end - sleep.start)\n\n\nclass TemperatureTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n self.temp = models.Temperature.objects.create(child=self.child,\n time=timezone.localtime() - timezone.timedelta(days=1),\n temperature=98.6)\n\n def test_temperature_create(self):\n self.assertEqual(self.temp, models.Temperature.objects.first())\n self.assertEqual(str(self.temp), 'Temperature')\n self.assertEqual(self.temp.temperature, 98.6)\n\n\nclass TimerTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n child = models.Child.objects.create(first_name='First', last_name=\n 'Last', birth_date=timezone.localdate())\n self.user = User.objects.first()\n self.named = models.Timer.objects.create(name='Named', end=timezone\n .localtime(), user=self.user, child=child)\n self.unnamed = models.Timer.objects.create(end=timezone.localtime(),\n user=self.user)\n\n def test_timer_create(self):\n self.assertEqual(self.named, models.Timer.objects.get(name='Named'))\n self.assertEqual(str(self.named), 'Named')\n self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))\n self.assertEqual(str(self.unnamed), 'Timer #{}'.format(self.unnamed.id)\n )\n\n def test_timer_title_with_child(self):\n self.assertEqual(self.named.title_with_child, str(self.named))\n models.Child.objects.create(first_name='Child', last_name='Two',\n birth_date=timezone.localdate())\n self.assertEqual(self.named.title_with_child, '{} ({})'.format(str(\n self.named), str(self.named.child)))\n\n def test_timer_user_username(self):\n self.assertEqual(self.named.user_username, self.user.get_username())\n self.user.first_name = 'User'\n self.user.last_name = 'Name'\n self.user.save()\n self.assertEqual(self.named.user_username, self.user.get_full_name())\n\n def test_timer_restart(self):\n self.named.restart()\n self.assertIsNone(self.named.end)\n self.assertIsNone(self.named.duration)\n self.assertTrue(self.named.active)\n\n def test_timer_stop(self):\n stop_time = timezone.localtime()\n self.unnamed.stop(end=stop_time)\n self.assertEqual(self.unnamed.end, stop_time)\n self.assertEqual(self.unnamed.duration.seconds, (self.unnamed.end -\n self.unnamed.start).seconds)\n self.assertFalse(self.unnamed.active)\n\n def test_timer_duration(self):\n timer = models.Timer.objects.create(user=User.objects.first())\n timer.start = timezone.localtime() - timezone.timedelta(minutes=30)\n timer.save()\n timer.refresh_from_db()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n timer.stop()\n self.assertEqual(timer.duration.seconds, timezone.timedelta(minutes\n =30).seconds)\n\n\nclass TummyTimeTestCase(TestCase):\n\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(first_name='First',\n last_name='Last', birth_date=timezone.localdate())\n\n def test_tummytime_create(self):\n tummy_time = models.TummyTime.objects.create(child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime())\n self.assertEqual(tummy_time, models.TummyTime.objects.first())\n self.assertEqual(str(tummy_time), 'Tummy Time')\n self.assertEqual(tummy_time.duration, tummy_time.end - tummy_time.start\n )\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.contrib.auth.models import User\nfrom django.core.management import call_command\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom core import models\n\n\nclass ChildTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n\n def test_child_create(self):\n child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n self.assertEqual(child, models.Child.objects.get(first_name='First'))\n self.assertEqual(child.slug, 'first-last')\n self.assertEqual(str(child), 'First Last')\n self.assertEqual(child.name(), 'First Last')\n self.assertEqual(child.name(reverse=True), 'Last, First')\n\n def test_child_count(self):\n self.assertEqual(models.Child.count(), 0)\n models.Child.objects.create(\n first_name='First 1',\n last_name='Last 1',\n birth_date=timezone.localdate()\n )\n self.assertEqual(models.Child.count(), 1)\n child = models.Child.objects.create(\n first_name='First 2',\n last_name='Last 2',\n birth_date=timezone.localdate()\n )\n self.assertEqual(models.Child.count(), 2)\n child.delete()\n self.assertEqual(models.Child.count(), 1)\n\n\nclass DiaperChangeTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n self.change = models.DiaperChange.objects.create(\n child=self.child,\n time=timezone.localtime() - timezone.timedelta(days=1),\n wet=1,\n solid=1,\n color='black',\n amount=1.25\n )\n\n def test_diaperchange_create(self):\n self.assertEqual(self.change, models.DiaperChange.objects.first())\n self.assertEqual(str(self.change), 'Diaper Change')\n self.assertEqual(self.change.child, self.child)\n self.assertTrue(self.change.wet)\n self.assertTrue(self.change.solid)\n self.assertEqual(self.change.color, 'black')\n self.assertEqual(self.change.amount, 1.25)\n\n def test_diaperchange_attributes(self):\n self.assertListEqual(\n self.change.attributes(), ['Wet', 'Solid', 'Black'])\n\n\nclass FeedingTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n\n def test_feeding_create(self):\n feeding = models.Feeding.objects.create(\n child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime(),\n type='formula',\n method='bottle',\n amount=2\n )\n self.assertEqual(feeding, models.Feeding.objects.first())\n self.assertEqual(str(feeding), 'Feeding')\n self.assertEqual(feeding.duration, feeding.end - feeding.start)\n\n def test_method_both_breasts(self):\n feeding = models.Feeding.objects.create(\n child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime(),\n type='breast milk',\n method='both breasts'\n )\n self.assertEqual(feeding, models.Feeding.objects.first())\n self.assertEqual(str(feeding), 'Feeding')\n self.assertEqual(feeding.method, 'both breasts')\n\n\nclass NoteTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n\n def test_note_create(self):\n note = models.Note.objects.create(\n child=self.child, note='Note', time=timezone.localtime())\n self.assertEqual(note, models.Note.objects.first())\n self.assertEqual(str(note), 'Note')\n\n\nclass SleepTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n\n def test_sleep_create(self):\n sleep = models.Sleep.objects.create(\n child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime(),\n )\n self.assertEqual(sleep, models.Sleep.objects.first())\n self.assertEqual(str(sleep), 'Sleep')\n self.assertEqual(sleep.duration, sleep.end - sleep.start)\n\n\nclass TemperatureTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n self.temp = models.Temperature.objects.create(\n child=self.child,\n time=timezone.localtime() - timezone.timedelta(days=1),\n temperature=98.6\n )\n\n def test_temperature_create(self):\n self.assertEqual(self.temp, models.Temperature.objects.first())\n self.assertEqual(str(self.temp), 'Temperature')\n self.assertEqual(self.temp.temperature, 98.6)\n\n\nclass TimerTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n self.user = User.objects.first()\n self.named = models.Timer.objects.create(\n name='Named',\n end=timezone.localtime(),\n user=self.user,\n child=child\n )\n self.unnamed = models.Timer.objects.create(\n end=timezone.localtime(),\n user=self.user\n )\n\n def test_timer_create(self):\n self.assertEqual(self.named, models.Timer.objects.get(name='Named'))\n self.assertEqual(str(self.named), 'Named')\n self.assertEqual(self.unnamed, models.Timer.objects.get(name=None))\n self.assertEqual(\n str(self.unnamed), 'Timer #{}'.format(self.unnamed.id))\n\n def test_timer_title_with_child(self):\n self.assertEqual(self.named.title_with_child, str(self.named))\n\n models.Child.objects.create(\n first_name='Child',\n last_name='Two',\n birth_date=timezone.localdate()\n )\n self.assertEqual(\n self.named.title_with_child,\n '{} ({})'.format(str(self.named), str(self.named.child))\n )\n\n def test_timer_user_username(self):\n self.assertEqual(self.named.user_username, self.user.get_username())\n self.user.first_name = 'User'\n self.user.last_name = 'Name'\n self.user.save()\n self.assertEqual(self.named.user_username, self.user.get_full_name())\n\n def test_timer_restart(self):\n self.named.restart()\n self.assertIsNone(self.named.end)\n self.assertIsNone(self.named.duration)\n self.assertTrue(self.named.active)\n\n def test_timer_stop(self):\n stop_time = timezone.localtime()\n self.unnamed.stop(end=stop_time)\n self.assertEqual(self.unnamed.end, stop_time)\n self.assertEqual(\n self.unnamed.duration.seconds,\n (self.unnamed.end - self.unnamed.start).seconds)\n self.assertFalse(self.unnamed.active)\n\n def test_timer_duration(self):\n timer = models.Timer.objects.create(user=User.objects.first())\n # Timer.start uses auto_now_add, so it cannot be set in create().\n timer.start = timezone.localtime() - timezone.timedelta(minutes=30)\n timer.save()\n timer.refresh_from_db()\n\n self.assertEqual(\n timer.duration.seconds,\n timezone.timedelta(minutes=30).seconds)\n timer.stop()\n self.assertEqual(\n timer.duration.seconds,\n timezone.timedelta(minutes=30).seconds)\n\n\nclass TummyTimeTestCase(TestCase):\n def setUp(self):\n call_command('migrate', verbosity=0)\n self.child = models.Child.objects.create(\n first_name='First',\n last_name='Last',\n birth_date=timezone.localdate()\n )\n\n def test_tummytime_create(self):\n tummy_time = models.TummyTime.objects.create(\n child=self.child,\n start=timezone.localtime() - timezone.timedelta(minutes=30),\n end=timezone.localtime(),\n )\n self.assertEqual(tummy_time, models.TummyTime.objects.first())\n self.assertEqual(str(tummy_time), 'Tummy Time')\n self.assertEqual(\n tummy_time.duration, tummy_time.end - tummy_time.start)\n",
"step-ids": [
10,
17,
25,
26,
34
]
}
|
[
10,
17,
25,
26,
34
] |
<|reserved_special_token_0|>
@router.get('/', status_code=status.HTTP_200_OK, response_model=List[
schemas.ShowBlog])
def all_blog(db: Session=Depends(database.get_db), current_user: schemas.
User=Depends(get_current_user)):
return blog.all_blog(db)
@router.post('/', status_code=status.HTTP_201_CREATED)
def create(request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.create(request, db)
@router.delete('/{id}', status_code=status.HTTP_200_OK)
def destroy(id, db: Session=Depends(database.get_db), current_user: schemas
.User=Depends(get_current_user)):
return blog.destroy(id, db)
@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)
def update(id, request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.update(id, request, db)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@router.get('/', status_code=status.HTTP_200_OK, response_model=List[
schemas.ShowBlog])
def all_blog(db: Session=Depends(database.get_db), current_user: schemas.
User=Depends(get_current_user)):
return blog.all_blog(db)
@router.post('/', status_code=status.HTTP_201_CREATED)
def create(request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.create(request, db)
@router.delete('/{id}', status_code=status.HTTP_200_OK)
def destroy(id, db: Session=Depends(database.get_db), current_user: schemas
.User=Depends(get_current_user)):
return blog.destroy(id, db)
@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)
def update(id, request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.update(id, request, db)
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas
.ShowBlog)
def show(id, response: Response, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.show(id, response, db)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
router = APIRouter(prefix='/blog', tags=['Blog'])
@router.get('/', status_code=status.HTTP_200_OK, response_model=List[
schemas.ShowBlog])
def all_blog(db: Session=Depends(database.get_db), current_user: schemas.
User=Depends(get_current_user)):
return blog.all_blog(db)
@router.post('/', status_code=status.HTTP_201_CREATED)
def create(request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.create(request, db)
@router.delete('/{id}', status_code=status.HTTP_200_OK)
def destroy(id, db: Session=Depends(database.get_db), current_user: schemas
.User=Depends(get_current_user)):
return blog.destroy(id, db)
@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)
def update(id, request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.update(id, request, db)
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas
.ShowBlog)
def show(id, response: Response, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.show(id, response, db)
<|reserved_special_token_1|>
from fastapi import APIRouter, Depends, status, Response
from typing import List
import schemas, database
from sqlalchemy.orm import Session
import repository.blog as blog
from .oauth2 import get_current_user
router = APIRouter(prefix='/blog', tags=['Blog'])
@router.get('/', status_code=status.HTTP_200_OK, response_model=List[
schemas.ShowBlog])
def all_blog(db: Session=Depends(database.get_db), current_user: schemas.
User=Depends(get_current_user)):
return blog.all_blog(db)
@router.post('/', status_code=status.HTTP_201_CREATED)
def create(request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.create(request, db)
@router.delete('/{id}', status_code=status.HTTP_200_OK)
def destroy(id, db: Session=Depends(database.get_db), current_user: schemas
.User=Depends(get_current_user)):
return blog.destroy(id, db)
@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)
def update(id, request: schemas.Blog, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.update(id, request, db)
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas
.ShowBlog)
def show(id, response: Response, db: Session=Depends(database.get_db),
current_user: schemas.User=Depends(get_current_user)):
return blog.show(id, response, db)
<|reserved_special_token_1|>
from fastapi import APIRouter, Depends, status, Response
from typing import List
import schemas, database
from sqlalchemy.orm import Session
import repository.blog as blog
from .oauth2 import get_current_user
router = APIRouter(
prefix="/blog",
tags=['Blog'])
@router.get('/', status_code=status.HTTP_200_OK, response_model=List[schemas.ShowBlog])
def all_blog(db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.all_blog(db)
@router.post('/', status_code=status.HTTP_201_CREATED)
def create(request:schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.create(request, db)
@router.delete('/{id}', status_code=status.HTTP_200_OK)
def destroy(id, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.destroy(id, db)
@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)
def update(id, request: schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.update(id, request, db)
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.ShowBlog)
def show(id, response: Response, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.show(id, response, db)
|
flexible
|
{
"blob_id": "7fd5e83d28e919e7b94cea290c6b4db3378938b6",
"index": 4600,
"step-1": "<mask token>\n\n\n@router.get('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\n@router.post('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\n@router.delete('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\n@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@router.get('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\n@router.post('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\n@router.delete('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\n@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\n@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas\n .ShowBlog)\ndef show(id, response: Response, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)\n",
"step-3": "<mask token>\nrouter = APIRouter(prefix='/blog', tags=['Blog'])\n\n\n@router.get('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\n@router.post('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\n@router.delete('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\n@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\n@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas\n .ShowBlog)\ndef show(id, response: Response, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)\n",
"step-4": "from fastapi import APIRouter, Depends, status, Response\nfrom typing import List\nimport schemas, database\nfrom sqlalchemy.orm import Session\nimport repository.blog as blog\nfrom .oauth2 import get_current_user\nrouter = APIRouter(prefix='/blog', tags=['Blog'])\n\n\n@router.get('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\n@router.post('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\n@router.delete('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\n@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\n@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas\n .ShowBlog)\ndef show(id, response: Response, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)\n",
"step-5": "from fastapi import APIRouter, Depends, status, Response\nfrom typing import List\nimport schemas, database\nfrom sqlalchemy.orm import Session\nimport repository.blog as blog\nfrom .oauth2 import get_current_user\n\nrouter = APIRouter(\n prefix=\"/blog\",\n tags=['Blog'])\n\n@router.get('/', status_code=status.HTTP_200_OK, response_model=List[schemas.ShowBlog])\ndef all_blog(db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n@router.post('/', status_code=status.HTTP_201_CREATED)\ndef create(request:schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n@router.delete('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.ShowBlog)\ndef show(id, response: Response, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test(input, output, duration):
results = twoSensorAvg(input, duration)
print(results)
if len(results) != len(output):
return False
for i in range(len(output)):
if results[i] != output[i]:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def twoSensorAvg(input_data, duration=1):
times = {}
for i in input_data:
data = i.split(',')
time = int(int(data[1]) / (duration * 1000))
if time not in times:
times[time] = [0, 0]
times[time][0] += int(data[2])
times[time][1] += 1
ans = []
for i, v in times.items():
i = int(i)
a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 *
(duration - 1) + 999) + ': ' + str(round(float(v[0] / v[1]), 2))
ans.append(a)
return ans
def test(input, output, duration):
results = twoSensorAvg(input, duration)
print(results)
if len(results) != len(output):
return False
for i in range(len(output)):
if results[i] != output[i]:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def twoSensorAvg(input_data, duration=1):
times = {}
for i in input_data:
data = i.split(',')
time = int(int(data[1]) / (duration * 1000))
if time not in times:
times[time] = [0, 0]
times[time][0] += int(data[2])
times[time][1] += 1
ans = []
for i, v in times.items():
i = int(i)
a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 *
(duration - 1) + 999) + ': ' + str(round(float(v[0] / v[1]), 2))
ans.append(a)
return ans
def test(input, output, duration):
results = twoSensorAvg(input, duration)
print(results)
if len(results) != len(output):
return False
for i in range(len(output)):
if results[i] != output[i]:
return False
return True
if __name__ == '__main__':
input_data = ['1,10000,40', '1,10002,45', '1,11015,50', '2,10005,42',
'2,11051,45', '2,12064,42', '2,13161,42']
ans = ['10000-10999: 42.33', '11000-11999: 47.5', '12000-12999: 42.0',
'13000-13999: 42.0']
print(test(input_data, ans, 1))
|
flexible
|
{
"blob_id": "836d712c811079f190eae9c2780131a844c9dddf",
"index": 3044,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test(input, output, duration):\n results = twoSensorAvg(input, duration)\n print(results)\n if len(results) != len(output):\n return False\n for i in range(len(output)):\n if results[i] != output[i]:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "def twoSensorAvg(input_data, duration=1):\n times = {}\n for i in input_data:\n data = i.split(',')\n time = int(int(data[1]) / (duration * 1000))\n if time not in times:\n times[time] = [0, 0]\n times[time][0] += int(data[2])\n times[time][1] += 1\n ans = []\n for i, v in times.items():\n i = int(i)\n a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 *\n (duration - 1) + 999) + ': ' + str(round(float(v[0] / v[1]), 2))\n ans.append(a)\n return ans\n\n\ndef test(input, output, duration):\n results = twoSensorAvg(input, duration)\n print(results)\n if len(results) != len(output):\n return False\n for i in range(len(output)):\n if results[i] != output[i]:\n return False\n return True\n\n\n<mask token>\n",
"step-4": "def twoSensorAvg(input_data, duration=1):\n times = {}\n for i in input_data:\n data = i.split(',')\n time = int(int(data[1]) / (duration * 1000))\n if time not in times:\n times[time] = [0, 0]\n times[time][0] += int(data[2])\n times[time][1] += 1\n ans = []\n for i, v in times.items():\n i = int(i)\n a = str(i * duration * 1000) + '-' + str(i * duration * 1000 + 1000 *\n (duration - 1) + 999) + ': ' + str(round(float(v[0] / v[1]), 2))\n ans.append(a)\n return ans\n\n\ndef test(input, output, duration):\n results = twoSensorAvg(input, duration)\n print(results)\n if len(results) != len(output):\n return False\n for i in range(len(output)):\n if results[i] != output[i]:\n return False\n return True\n\n\nif __name__ == '__main__':\n input_data = ['1,10000,40', '1,10002,45', '1,11015,50', '2,10005,42',\n '2,11051,45', '2,12064,42', '2,13161,42']\n ans = ['10000-10999: 42.33', '11000-11999: 47.5', '12000-12999: 42.0',\n '13000-13999: 42.0']\n print(test(input_data, ans, 1))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
highscores = []
scores = []
while True:
user = input('> ').split(' ')
score = int(user[0])
name = user[1]
scores.append([score, name])
scores.sort(reverse=True)
if len(scores) < 3:
highscores = scores
else:
highscores = scores[:3]
print(highscores)
|
normal
|
{
"blob_id": "54e5feee3c8bb35c351361fd3ed4b5e237e5973d",
"index": 6701,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n user = input('> ').split(' ')\n score = int(user[0])\n name = user[1]\n scores.append([score, name])\n scores.sort(reverse=True)\n if len(scores) < 3:\n highscores = scores\n else:\n highscores = scores[:3]\n print(highscores)\n",
"step-3": "highscores = []\nscores = []\nwhile True:\n user = input('> ').split(' ')\n score = int(user[0])\n name = user[1]\n scores.append([score, name])\n scores.sort(reverse=True)\n if len(scores) < 3:\n highscores = scores\n else:\n highscores = scores[:3]\n print(highscores)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# 运算符的优先级
# 和数学中一样,在Python运算也有优先级,比如先乘除 后加减
# 运算符的优先级可以根据优先级的表格来查询,
# 在表格中位置越靠下的运算符优先级越高,优先级越高的越优先计算
# 如果优先级一样则自左向右计算
# 关于优先级的表格,你知道有这么一个东西就够了,千万不要去记
# 在开发中如果遇到优先级不清楚的,则可以通过小括号来改变运算顺序
a = 1 + 2 * 3
# 一样 and高 or高
# 如果or的优先级高,或者两个运算符的优先级一样高
# 则需要先进行或运算,则运算结果是3
# 如果and的优先级高,则应该先计算与运算
# 则运算结果是1
a = 1 or 2 and 3
# print(a)
# 逻辑运算符(补充)
# 逻辑运算符可以连着使用
result = 1 < 2 < 3 # 相当于 1 < 2 and 2 < 3
result = 10 < 20 > 15
print(result)
|
normal
|
{
"blob_id": "25550cbaf6e0e5bdbbe3852bb8cdc05ac300d315",
"index": 8872,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(result)\n",
"step-3": "a = 1 + 2 * 3\na = 1 or 2 and 3\nresult = 1 < 2 < 3\nresult = 10 < 20 > 15\nprint(result)\n",
"step-4": "# 运算符的优先级\n# 和数学中一样,在Python运算也有优先级,比如先乘除 后加减\n# 运算符的优先级可以根据优先级的表格来查询,\n# 在表格中位置越靠下的运算符优先级越高,优先级越高的越优先计算\n# 如果优先级一样则自左向右计算\n# 关于优先级的表格,你知道有这么一个东西就够了,千万不要去记\n# 在开发中如果遇到优先级不清楚的,则可以通过小括号来改变运算顺序\na = 1 + 2 * 3\n\n# 一样 and高 or高\n# 如果or的优先级高,或者两个运算符的优先级一样高\n# 则需要先进行或运算,则运算结果是3\n# 如果and的优先级高,则应该先计算与运算\n# 则运算结果是1\na = 1 or 2 and 3\n\n# print(a)\n\n# 逻辑运算符(补充)\n# 逻辑运算符可以连着使用\nresult = 1 < 2 < 3 # 相当于 1 < 2 and 2 < 3\nresult = 10 < 20 > 15\n\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Submodule(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Submodule(nn.Module):
def __init__(self, layer_sizes: typing.List[int], activation_name: str,
use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,
device: str='cuda:0', seed: int=0):
super().__init__()
with PyTorchRandomStateContext(seed):
n_layers = len(layer_sizes) - 1
activation = get_activation(activation_name)
layers = []
for i in range(n_layers):
if i > 0:
layers.append(activation())
if dropout > 0.0:
layers.append(nn.Dropout(p=dropout))
if use_batch_norm:
layers.append(BatchNorm1d(layer_sizes[i]))
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
self.net = nn.Sequential(*layers)
self.net.to(device=device)
if use_skip:
if layer_sizes[0] == layer_sizes[-1]:
self.skip = nn.Sequential()
else:
self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])
self.skip.to(device=device)
else:
self.skip = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Submodule(nn.Module):
def __init__(self, layer_sizes: typing.List[int], activation_name: str,
use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,
device: str='cuda:0', seed: int=0):
super().__init__()
with PyTorchRandomStateContext(seed):
n_layers = len(layer_sizes) - 1
activation = get_activation(activation_name)
layers = []
for i in range(n_layers):
if i > 0:
layers.append(activation())
if dropout > 0.0:
layers.append(nn.Dropout(p=dropout))
if use_batch_norm:
layers.append(BatchNorm1d(layer_sizes[i]))
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
self.net = nn.Sequential(*layers)
self.net.to(device=device)
if use_skip:
if layer_sizes[0] == layer_sizes[-1]:
self.skip = nn.Sequential()
else:
self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])
self.skip.to(device=device)
else:
self.skip = None
def forward(self, x):
if self.skip is None:
return self.net(x)
else:
return self.net(x) + self.skip(x)
<|reserved_special_token_1|>
import typing
import torch.nn as nn
from .torch_utils import get_activation, BatchNorm1d
from dna.models.torch_modules.torch_utils import PyTorchRandomStateContext
class Submodule(nn.Module):
def __init__(self, layer_sizes: typing.List[int], activation_name: str,
use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,
device: str='cuda:0', seed: int=0):
super().__init__()
with PyTorchRandomStateContext(seed):
n_layers = len(layer_sizes) - 1
activation = get_activation(activation_name)
layers = []
for i in range(n_layers):
if i > 0:
layers.append(activation())
if dropout > 0.0:
layers.append(nn.Dropout(p=dropout))
if use_batch_norm:
layers.append(BatchNorm1d(layer_sizes[i]))
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
self.net = nn.Sequential(*layers)
self.net.to(device=device)
if use_skip:
if layer_sizes[0] == layer_sizes[-1]:
self.skip = nn.Sequential()
else:
self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])
self.skip.to(device=device)
else:
self.skip = None
def forward(self, x):
if self.skip is None:
return self.net(x)
else:
return self.net(x) + self.skip(x)
|
flexible
|
{
"blob_id": "950b2906853c37cdeaa8ed1076fff79dbe99b6f8",
"index": 8327,
"step-1": "<mask token>\n\n\nclass Submodule(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Submodule(nn.Module):\n\n def __init__(self, layer_sizes: typing.List[int], activation_name: str,\n use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,\n device: str='cuda:0', seed: int=0):\n super().__init__()\n with PyTorchRandomStateContext(seed):\n n_layers = len(layer_sizes) - 1\n activation = get_activation(activation_name)\n layers = []\n for i in range(n_layers):\n if i > 0:\n layers.append(activation())\n if dropout > 0.0:\n layers.append(nn.Dropout(p=dropout))\n if use_batch_norm:\n layers.append(BatchNorm1d(layer_sizes[i]))\n layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))\n self.net = nn.Sequential(*layers)\n self.net.to(device=device)\n if use_skip:\n if layer_sizes[0] == layer_sizes[-1]:\n self.skip = nn.Sequential()\n else:\n self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])\n self.skip.to(device=device)\n else:\n self.skip = None\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Submodule(nn.Module):\n\n def __init__(self, layer_sizes: typing.List[int], activation_name: str,\n use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,\n device: str='cuda:0', seed: int=0):\n super().__init__()\n with PyTorchRandomStateContext(seed):\n n_layers = len(layer_sizes) - 1\n activation = get_activation(activation_name)\n layers = []\n for i in range(n_layers):\n if i > 0:\n layers.append(activation())\n if dropout > 0.0:\n layers.append(nn.Dropout(p=dropout))\n if use_batch_norm:\n layers.append(BatchNorm1d(layer_sizes[i]))\n layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))\n self.net = nn.Sequential(*layers)\n self.net.to(device=device)\n if use_skip:\n if layer_sizes[0] == layer_sizes[-1]:\n self.skip = nn.Sequential()\n else:\n self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])\n self.skip.to(device=device)\n else:\n self.skip = None\n\n def forward(self, x):\n if self.skip is None:\n return self.net(x)\n else:\n return self.net(x) + self.skip(x)\n",
"step-4": "import typing\nimport torch.nn as nn\nfrom .torch_utils import get_activation, BatchNorm1d\nfrom dna.models.torch_modules.torch_utils import PyTorchRandomStateContext\n\n\nclass Submodule(nn.Module):\n\n def __init__(self, layer_sizes: typing.List[int], activation_name: str,\n use_batch_norm: bool, use_skip: bool=False, dropout: float=0.0, *,\n device: str='cuda:0', seed: int=0):\n super().__init__()\n with PyTorchRandomStateContext(seed):\n n_layers = len(layer_sizes) - 1\n activation = get_activation(activation_name)\n layers = []\n for i in range(n_layers):\n if i > 0:\n layers.append(activation())\n if dropout > 0.0:\n layers.append(nn.Dropout(p=dropout))\n if use_batch_norm:\n layers.append(BatchNorm1d(layer_sizes[i]))\n layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))\n self.net = nn.Sequential(*layers)\n self.net.to(device=device)\n if use_skip:\n if layer_sizes[0] == layer_sizes[-1]:\n self.skip = nn.Sequential()\n else:\n self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])\n self.skip.to(device=device)\n else:\n self.skip = None\n\n def forward(self, x):\n if self.skip is None:\n return self.net(x)\n else:\n return self.net(x) + self.skip(x)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
<|reserved_special_token_0|>
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
return render(request, 'index.html')
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
<|reserved_special_token_0|>
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
return render(request, 'index.html')
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
def contact(request):
return HttpResponse(
"<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>"
)
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
<|reserved_special_token_1|>
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
def contact(request):
return HttpResponse(
"<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>"
)
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
<|reserved_special_token_1|>
# I Have Created this file -Nabeel
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request,'index.html')
def aboutme(request):
return HttpResponse (" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
def contact(request):
return HttpResponse ("<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>")
def analyze(request):
#get the text
djtext = request.POST.get('text', 'default')
#check checkbox value
removepunc = request.POST.get('removepunc', 'off') #on & off
fullcaps = request.POST.get('fullcaps','off')
newlineremover = request.POST.get('newlineremover','off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
#check which checkbox is on
if removepunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed=""
for char in djtext:
if char not in punctuations:
analyzed=analyzed + char
dics = {'purpose':'Removed Punctuations' , 'analyzed_text':analyzed}
djtext=analyzed
#return render(request,'analyze.html',dics)
if (fullcaps == "on"):
analyzed = ""
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
# Analyze the text
djtext = analyzed
# return render(request, 'analyze.html', dics)
if (newlineremover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char != "\r":
analyzed = analyzed + char
else:
print("no")
print("pre", analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext=analyzed
# Analyze the text
#return render(request, 'analyze.html', dics)
if (extraspaceremover == "on"):
analyzed = ""
for index, char in enumerate(djtext):
if not (djtext[index] == "" and djtext[index+1] == ""):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text': analyzed}
djtext = analyzed
#return render(request, 'analyze.html', dics)
if (charcount == "on"):
analyzed = ""
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are', 'analyzed_text': analyzed}
if (removepunc != "on" and fullcaps != "on" and newlineremover != "on" and extraspaceremover != "on" and charcount!= "on"):
return HttpResponse("Please Select Any Function And Try Again!")
return render(request, 'analyze.html', dics)
|
flexible
|
{
"blob_id": "512d0a293b0cc3e6f7d84bb6958dc6693acde680",
"index": 1612,
"step-1": "<mask token>\n\n\ndef aboutme(request):\n return HttpResponse(\n \" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>\")\n\n\n<mask token>\n\n\ndef analyze(request):\n djtext = request.POST.get('text', 'default')\n removepunc = request.POST.get('removepunc', 'off')\n fullcaps = request.POST.get('fullcaps', 'off')\n newlineremover = request.POST.get('newlineremover', 'off')\n extraspaceremover = request.POST.get('extraspaceremover', 'off')\n charcount = request.POST.get('charcount', 'off')\n print(removepunc)\n if removepunc == 'on':\n punctuations = '!()-[]{};:\\'\"\\\\,<>./?@#$%^&*_~'\n analyzed = ''\n for char in djtext:\n if char not in punctuations:\n analyzed = analyzed + char\n dics = {'purpose': 'Removed Punctuations', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if fullcaps == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = analyzed + char.upper()\n dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}\n djtext = analyzed\n if newlineremover == 'on':\n analyzed = ''\n for char in djtext:\n if char != '\\n' and char != '\\r':\n analyzed = analyzed + char\n else:\n print('no')\n print('pre', analyzed)\n dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}\n djtext = analyzed\n if extraspaceremover == 'on':\n analyzed = ''\n for index, char in enumerate(djtext):\n if not (djtext[index] == '' and djtext[index + 1] == ''):\n analyzed = analyzed + char\n dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if charcount == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = len(djtext)\n dics = {'purpose': 'Total no. of Character in your text are',\n 'analyzed_text': analyzed}\n if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and\n extraspaceremover != 'on' and charcount != 'on'):\n return HttpResponse('Please Select Any Function And Try Again!')\n return render(request, 'analyze.html', dics)\n",
"step-2": "<mask token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef aboutme(request):\n return HttpResponse(\n \" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>\")\n\n\n<mask token>\n\n\ndef analyze(request):\n djtext = request.POST.get('text', 'default')\n removepunc = request.POST.get('removepunc', 'off')\n fullcaps = request.POST.get('fullcaps', 'off')\n newlineremover = request.POST.get('newlineremover', 'off')\n extraspaceremover = request.POST.get('extraspaceremover', 'off')\n charcount = request.POST.get('charcount', 'off')\n print(removepunc)\n if removepunc == 'on':\n punctuations = '!()-[]{};:\\'\"\\\\,<>./?@#$%^&*_~'\n analyzed = ''\n for char in djtext:\n if char not in punctuations:\n analyzed = analyzed + char\n dics = {'purpose': 'Removed Punctuations', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if fullcaps == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = analyzed + char.upper()\n dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}\n djtext = analyzed\n if newlineremover == 'on':\n analyzed = ''\n for char in djtext:\n if char != '\\n' and char != '\\r':\n analyzed = analyzed + char\n else:\n print('no')\n print('pre', analyzed)\n dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}\n djtext = analyzed\n if extraspaceremover == 'on':\n analyzed = ''\n for index, char in enumerate(djtext):\n if not (djtext[index] == '' and djtext[index + 1] == ''):\n analyzed = analyzed + char\n dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if charcount == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = len(djtext)\n dics = {'purpose': 'Total no. of Character in your text are',\n 'analyzed_text': analyzed}\n if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and\n extraspaceremover != 'on' and charcount != 'on'):\n return HttpResponse('Please Select Any Function And Try Again!')\n return render(request, 'analyze.html', dics)\n",
"step-3": "<mask token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef aboutme(request):\n return HttpResponse(\n \" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>\")\n\n\ndef contact(request):\n return HttpResponse(\n \"<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>\"\n )\n\n\ndef analyze(request):\n djtext = request.POST.get('text', 'default')\n removepunc = request.POST.get('removepunc', 'off')\n fullcaps = request.POST.get('fullcaps', 'off')\n newlineremover = request.POST.get('newlineremover', 'off')\n extraspaceremover = request.POST.get('extraspaceremover', 'off')\n charcount = request.POST.get('charcount', 'off')\n print(removepunc)\n if removepunc == 'on':\n punctuations = '!()-[]{};:\\'\"\\\\,<>./?@#$%^&*_~'\n analyzed = ''\n for char in djtext:\n if char not in punctuations:\n analyzed = analyzed + char\n dics = {'purpose': 'Removed Punctuations', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if fullcaps == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = analyzed + char.upper()\n dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}\n djtext = analyzed\n if newlineremover == 'on':\n analyzed = ''\n for char in djtext:\n if char != '\\n' and char != '\\r':\n analyzed = analyzed + char\n else:\n print('no')\n print('pre', analyzed)\n dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}\n djtext = analyzed\n if extraspaceremover == 'on':\n analyzed = ''\n for index, char in enumerate(djtext):\n if not (djtext[index] == '' and djtext[index + 1] == ''):\n analyzed = analyzed + char\n dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if charcount == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = len(djtext)\n dics = {'purpose': 'Total no. of Character in your text are',\n 'analyzed_text': analyzed}\n if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and\n extraspaceremover != 'on' and charcount != 'on'):\n return HttpResponse('Please Select Any Function And Try Again!')\n return render(request, 'analyze.html', dics)\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef aboutme(request):\n return HttpResponse(\n \" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>\")\n\n\ndef contact(request):\n return HttpResponse(\n \"<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>\"\n )\n\n\ndef analyze(request):\n djtext = request.POST.get('text', 'default')\n removepunc = request.POST.get('removepunc', 'off')\n fullcaps = request.POST.get('fullcaps', 'off')\n newlineremover = request.POST.get('newlineremover', 'off')\n extraspaceremover = request.POST.get('extraspaceremover', 'off')\n charcount = request.POST.get('charcount', 'off')\n print(removepunc)\n if removepunc == 'on':\n punctuations = '!()-[]{};:\\'\"\\\\,<>./?@#$%^&*_~'\n analyzed = ''\n for char in djtext:\n if char not in punctuations:\n analyzed = analyzed + char\n dics = {'purpose': 'Removed Punctuations', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if fullcaps == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = analyzed + char.upper()\n dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}\n djtext = analyzed\n if newlineremover == 'on':\n analyzed = ''\n for char in djtext:\n if char != '\\n' and char != '\\r':\n analyzed = analyzed + char\n else:\n print('no')\n print('pre', analyzed)\n dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}\n djtext = analyzed\n if extraspaceremover == 'on':\n analyzed = ''\n for index, char in enumerate(djtext):\n if not (djtext[index] == '' and djtext[index + 1] == ''):\n analyzed = analyzed + char\n dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':\n analyzed}\n djtext = analyzed\n if charcount == 'on':\n analyzed = ''\n for char in djtext:\n analyzed = len(djtext)\n dics = {'purpose': 'Total no. of Character in your text are',\n 'analyzed_text': analyzed}\n if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and\n extraspaceremover != 'on' and charcount != 'on'):\n return HttpResponse('Please Select Any Function And Try Again!')\n return render(request, 'analyze.html', dics)\n",
"step-5": "# I Have Created this file -Nabeel\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef index(request):\n return render(request,'index.html')\n\n\ndef aboutme(request):\n return HttpResponse (\" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>\")\n\ndef contact(request):\n return HttpResponse (\"<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>\")\n\ndef analyze(request):\n #get the text\n djtext = request.POST.get('text', 'default')\n #check checkbox value\n removepunc = request.POST.get('removepunc', 'off') #on & off\n fullcaps = request.POST.get('fullcaps','off')\n newlineremover = request.POST.get('newlineremover','off')\n extraspaceremover = request.POST.get('extraspaceremover', 'off')\n charcount = request.POST.get('charcount', 'off')\n print(removepunc)\n\n #check which checkbox is on\n if removepunc == \"on\":\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n analyzed=\"\"\n for char in djtext:\n if char not in punctuations:\n analyzed=analyzed + char\n dics = {'purpose':'Removed Punctuations' , 'analyzed_text':analyzed}\n djtext=analyzed\n #return render(request,'analyze.html',dics)\n\n\n\n if (fullcaps == \"on\"):\n analyzed = \"\"\n for char in djtext:\n analyzed = analyzed + char.upper()\n\n dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}\n # Analyze the text\n djtext = analyzed\n # return render(request, 'analyze.html', dics)\n\n if (newlineremover == \"on\"):\n analyzed = \"\"\n for char in djtext:\n if char != \"\\n\" and char != \"\\r\":\n analyzed = analyzed + char\n else:\n print(\"no\")\n print(\"pre\", analyzed)\n dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}\n djtext=analyzed\n # Analyze the text\n #return render(request, 'analyze.html', dics)\n\n\n\n if (extraspaceremover == \"on\"):\n analyzed = \"\"\n for index, char in enumerate(djtext):\n if not (djtext[index] == \"\" and djtext[index+1] == \"\"):\n analyzed = analyzed + char\n\n dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text': analyzed}\n djtext = analyzed\n #return render(request, 'analyze.html', dics)\n\n if (charcount == \"on\"):\n analyzed = \"\"\n for char in djtext:\n analyzed = len(djtext)\n dics = {'purpose': 'Total no. of Character in your text are', 'analyzed_text': analyzed}\n if (removepunc != \"on\" and fullcaps != \"on\" and newlineremover != \"on\" and extraspaceremover != \"on\" and charcount!= \"on\"):\n\n return HttpResponse(\"Please Select Any Function And Try Again!\")\n\n return render(request, 'analyze.html', dics)\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from . import metrics
from . import matrices
from .pairwise import apply_pairwise_rect, apply_pairwise_sparse, apply_running_rect
from . import numba_tools as nb_tools
from . import running_metrics as running
__all__ = ['metrics', 'apply_pairwise_rect', 'apply_pairwise_sparse',
'apply_running_rect', 'nb_tools', 'matrices', 'running']
|
normal
|
{
"blob_id": "3605e8b8b2f8f49cc7c40fc436c147578b12091c",
"index": 6026,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['metrics', 'apply_pairwise_rect', 'apply_pairwise_sparse',\n 'apply_running_rect', 'nb_tools', 'matrices', 'running']\n",
"step-3": "from . import metrics\nfrom . import matrices\nfrom .pairwise import apply_pairwise_rect, apply_pairwise_sparse, apply_running_rect\nfrom . import numba_tools as nb_tools\nfrom . import running_metrics as running\n__all__ = ['metrics', 'apply_pairwise_rect', 'apply_pairwise_sparse',\n 'apply_running_rect', 'nb_tools', 'matrices', 'running']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
"""
Provides test-related code that can be used by all tests.
"""
import os
DATA_DIR = 'tests/data'
def get_data_path(file_name):
return os.path.join(DATA_DIR, file_name)
def assert_strings(test_case, actual, expected):
# Show both friendly and literal versions.
message = """\
Expected: \"""%s\"""
Actual: \"""%s\"""
Expected: %s
Actual: %s""" % (expected, actual, repr(expected), repr(actual))
test_case.assertEquals(actual, expected, message)
|
normal
|
{
"blob_id": "83d35c413af0cefb71964671b43df1e815aa2115",
"index": 3945,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n message = (\n \"\"\"\n\n Expected: \"\"\\\"%s\"\"\\\"\n Actual: \"\"\\\"%s\"\"\\\"\n\n Expected: %s\n Actual: %s\"\"\"\n % (expected, actual, repr(expected), repr(actual)))\n test_case.assertEquals(actual, expected, message)\n",
"step-3": "<mask token>\nDATA_DIR = 'tests/data'\n\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n message = (\n \"\"\"\n\n Expected: \"\"\\\"%s\"\"\\\"\n Actual: \"\"\\\"%s\"\"\\\"\n\n Expected: %s\n Actual: %s\"\"\"\n % (expected, actual, repr(expected), repr(actual)))\n test_case.assertEquals(actual, expected, message)\n",
"step-4": "<mask token>\nimport os\nDATA_DIR = 'tests/data'\n\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n message = (\n \"\"\"\n\n Expected: \"\"\\\"%s\"\"\\\"\n Actual: \"\"\\\"%s\"\"\\\"\n\n Expected: %s\n Actual: %s\"\"\"\n % (expected, actual, repr(expected), repr(actual)))\n test_case.assertEquals(actual, expected, message)\n",
"step-5": "# coding: utf-8\n\n\"\"\"\nProvides test-related code that can be used by all tests.\n\n\"\"\"\n\nimport os\n\n\nDATA_DIR = 'tests/data'\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n # Show both friendly and literal versions.\n message = \"\"\"\\\n\n\n Expected: \\\"\"\"%s\\\"\"\"\n Actual: \\\"\"\"%s\\\"\"\"\n\n Expected: %s\n Actual: %s\"\"\" % (expected, actual, repr(expected), repr(actual))\n test_case.assertEquals(actual, expected, message)\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('home', '0010_auto_20170512_2248')]
operations = [migrations.AlterField(model_name='classroom', name=
'subject5teacher', field=models.ForeignKey(default=None, on_delete=
django.db.models.deletion.CASCADE, related_name='+', to=
'home.Teacher', verbose_name='Chemistry'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('home', '0010_auto_20170512_2248')]
operations = [migrations.AlterField(model_name='classroom', name=
'subject5teacher', field=models.ForeignKey(default=None, on_delete=
django.db.models.deletion.CASCADE, related_name='+', to=
'home.Teacher', verbose_name='Chemistry'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-12 20:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0010_auto_20170512_2248'),
]
operations = [
migrations.AlterField(
model_name='classroom',
name='subject5teacher',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Teacher', verbose_name='Chemistry'),
),
]
|
flexible
|
{
"blob_id": "438efbaf35401a29ea5408fee3b49b85f237760e",
"index": 1089,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home', '0010_auto_20170512_2248')]\n operations = [migrations.AlterField(model_name='classroom', name=\n 'subject5teacher', field=models.ForeignKey(default=None, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to=\n 'home.Teacher', verbose_name='Chemistry'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home', '0010_auto_20170512_2248')]\n operations = [migrations.AlterField(model_name='classroom', name=\n 'subject5teacher', field=models.ForeignKey(default=None, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to=\n 'home.Teacher', verbose_name='Chemistry'))]\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# Generated by Django 1.11 on 2017-05-12 20:48\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('home', '0010_auto_20170512_2248'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='classroom',\r\n name='subject5teacher',\r\n field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Teacher', verbose_name='Chemistry'),\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Hello, world!')
<|reserved_special_token_1|>
"""
Prog: helloworld.py
Name: Samuel doyle
Date: 18/04/18
Desc: My first program!
"""
print('Hello, world!')
|
flexible
|
{
"blob_id": "513a2bbcf7a63baf900b73b18cf25618937dc7d0",
"index": 1054,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Hello, world!')\n",
"step-3": "\"\"\"\nProg: helloworld.py\nName: Samuel doyle\nDate: 18/04/18\nDesc: My first program!\n\"\"\"\n\nprint('Hello, world!')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from preprocessing import *
from utils import *
def find_optimal_param(lda, x_train, y_train):
probs_train = lda.predict_proba(x_train)[:, 1]
y_train = [x for _,x in sorted(zip(probs_train,y_train))]
y_train = np.array(y_train)
probs_train.sort()
Se = []
Sp = []
for p in range(len(probs_train)):
tp = np.count_nonzero(y_train[p:] == 1)
fp = np.count_nonzero(y_train[p:] == 0)
tn = np.count_nonzero(y_train[:p] == 0)
fn = np.count_nonzero(y_train[:p] == 1)
Se.append(tp/(tp+fn))
Sp.append(tn/(tn+fp))
mx = np.argmax(-(1-np.array(Sp) - np.array(Se)))
return probs_train[mx]
def predict(lda, x, y, m):
tp = 0
fp = 0
tn = 0
fn = 0
if len(x) != 0:
probs= lda.predict_proba(x)[:, 1]
for j in range(len(x)):
if probs[j] > m:
if y[j] == 1:
tp+=1
else:
fp+=1
else:
if y[j] == 1:
fn +=1
else:
tn +=1
return tp, fp, fn, tn
from methodutils import FdaUtils
class FDA_node(object):
def __init__(self):
"""Constructor"""
self.method = FdaUtils()
self.left = None
self.right = None
self.m = 0.5
def grow(self):
self.right = FDA_node()
self.left = FDA_node()
def find_optimal_param(self, x, y):
self.m = self.method.find_optimal_param(x, y)
if self.left != None and self.right != None:
left, right = self.divide_data(x)
self.left.find_optimal_param(x[left], y[left])
self.right.find_optimal_param(x[right], y[right])
def fit(self, x, y):
self.method.fit(x, y)
if self.left != None and self.right != None:
left, right = self.divide_data(x)
if (max(y[left]) == 0 or min(y[right]) == 1):
self.left = self.right = None
else:
self.right.fit(x[left], y[left])
self.left.fit(x[right], y[right])
def divide_data(self, x):
probs = self.method.predict_proba(x)[:, 1]
left = (probs <= self.m)
right = (probs > self.m)
return left, right
def predict(self, x):
if self.left == None and self.right == None:
pred = self.method.predict(x, self.m)
elif self.left != None and self.right != None:
left, right = self.divide_data(x)
l_pred = self.left.predict(x[left])
r_pred =self.right.predict(x[right])
pred = np.ones(x.shape[0])*2
pred[left] = l_pred
pred[right] = r_pred
return pred
if __name__ == "__main__":
np.seterr(all='raise')
from sklearn.metrics import confusion_matrix
from dataset import load_dataset, load_new_dataset_6002, diagnosis_to_binary, MOST_FREQ_DIAGS_NUMS_NEW
from fisher_discriminant import FisherDiscriminantAnalisys
num_components = 100
infile = open('C:\\Users\\donte_000\\PycharmProjects\\Basic_Methods\\data\\data_old_and_new_without_noise.pkl', 'rb')
(old, new) = pkl.load(infile)
infile.close()
Y = old["y"]
outfile = open('C:\\Users\\donte_000\\PycharmProjects\\Basic_Methods\\data\\6002_old_Dif.pkl', 'rb')
X = pkl.load(outfile)
outfile.close()
pca = PCA(n_components=X.shape[0])
b = pca.fit_transform(X)
for d in reversed(MOST_FREQ_DIAGS_NUMS_NEW):
y_prediction =[]
y_labels = []
for train_index, test_index in cross_val(b.shape[0], 500):
tree = FDA_node()
tree.grow()
tree.fit(b[train_index, :num_components],Y[train_index,d])
tree.find_optimal_param(b[train_index, :num_components], Y[train_index,d])
y_prediction.append(tree.predict(b[test_index, :num_components]))
y_labels.append(Y[test_index, d])
y_prediction = np.array(y_prediction).flatten()
y_labels = np.array(y_labels).flatten()
tn, fp, fn, tp = confusion_matrix(y_labels, y_prediction).ravel()
test_se = tp / (tp + fn)
test_sp = tn / (tn + fp)
print("Val. Se = %s, Val. Sp = %s" % (round(test_sp, 4), round(test_se, 4)))
|
normal
|
{
"blob_id": "784b51c05dc7b5e70016634e2664c9ec25b8a65a",
"index": 6506,
"step-1": "<mask token>\n\n\nclass FDA_node(object):\n <mask token>\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n <mask token>\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n <mask token>\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_optimal_param(lda, x_train, y_train):\n probs_train = lda.predict_proba(x_train)[:, 1]\n y_train = [x for _, x in sorted(zip(probs_train, y_train))]\n y_train = np.array(y_train)\n probs_train.sort()\n Se = []\n Sp = []\n for p in range(len(probs_train)):\n tp = np.count_nonzero(y_train[p:] == 1)\n fp = np.count_nonzero(y_train[p:] == 0)\n tn = np.count_nonzero(y_train[:p] == 0)\n fn = np.count_nonzero(y_train[:p] == 1)\n Se.append(tp / (tp + fn))\n Sp.append(tn / (tn + fp))\n mx = np.argmax(-(1 - np.array(Sp) - np.array(Se)))\n return probs_train[mx]\n\n\ndef predict(lda, x, y, m):\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n if len(x) != 0:\n probs = lda.predict_proba(x)[:, 1]\n for j in range(len(x)):\n if probs[j] > m:\n if y[j] == 1:\n tp += 1\n else:\n fp += 1\n elif y[j] == 1:\n fn += 1\n else:\n tn += 1\n return tp, fp, fn, tn\n\n\n<mask token>\n\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n\n def divide_data(self, x):\n probs = self.method.predict_proba(x)[:, 1]\n left = probs <= self.m\n right = probs > self.m\n return left, right\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\n<mask token>\n",
"step-4": "import numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom preprocessing import *\nfrom utils import *\n\n\ndef find_optimal_param(lda, x_train, y_train):\n probs_train = lda.predict_proba(x_train)[:, 1]\n y_train = [x for _, x in sorted(zip(probs_train, y_train))]\n y_train = np.array(y_train)\n probs_train.sort()\n Se = []\n Sp = []\n for p in range(len(probs_train)):\n tp = np.count_nonzero(y_train[p:] == 1)\n fp = np.count_nonzero(y_train[p:] == 0)\n tn = np.count_nonzero(y_train[:p] == 0)\n fn = np.count_nonzero(y_train[:p] == 1)\n Se.append(tp / (tp + fn))\n Sp.append(tn / (tn + fp))\n mx = np.argmax(-(1 - np.array(Sp) - np.array(Se)))\n return probs_train[mx]\n\n\ndef predict(lda, x, y, m):\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n if len(x) != 0:\n probs = lda.predict_proba(x)[:, 1]\n for j in range(len(x)):\n if probs[j] > m:\n if y[j] == 1:\n tp += 1\n else:\n fp += 1\n elif y[j] == 1:\n fn += 1\n else:\n tn += 1\n return tp, fp, fn, tn\n\n\nfrom methodutils import FdaUtils\n\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n\n def divide_data(self, x):\n probs = self.method.predict_proba(x)[:, 1]\n left = probs <= self.m\n right = probs > self.m\n return left, right\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\nif __name__ == '__main__':\n np.seterr(all='raise')\n from sklearn.metrics import confusion_matrix\n from dataset import load_dataset, load_new_dataset_6002, diagnosis_to_binary, MOST_FREQ_DIAGS_NUMS_NEW\n from fisher_discriminant import FisherDiscriminantAnalisys\n num_components = 100\n infile = open(\n 'C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\data_old_and_new_without_noise.pkl'\n , 'rb')\n old, new = pkl.load(infile)\n infile.close()\n Y = old['y']\n outfile = open(\n 'C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\6002_old_Dif.pkl'\n , 'rb')\n X = pkl.load(outfile)\n outfile.close()\n pca = PCA(n_components=X.shape[0])\n b = pca.fit_transform(X)\n for d in reversed(MOST_FREQ_DIAGS_NUMS_NEW):\n y_prediction = []\n y_labels = []\n for train_index, test_index in cross_val(b.shape[0], 500):\n tree = FDA_node()\n tree.grow()\n tree.fit(b[train_index, :num_components], Y[train_index, d])\n tree.find_optimal_param(b[train_index, :num_components], Y[\n train_index, d])\n y_prediction.append(tree.predict(b[test_index, :num_components]))\n y_labels.append(Y[test_index, d])\n y_prediction = np.array(y_prediction).flatten()\n y_labels = np.array(y_labels).flatten()\n tn, fp, fn, tp = confusion_matrix(y_labels, y_prediction).ravel()\n test_se = tp / (tp + fn)\n test_sp = tn / (tn + fp)\n print('Val. Se = %s, Val. Sp = %s' % (round(test_sp, 4), round(\n test_se, 4)))\n",
"step-5": "import numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nfrom preprocessing import *\nfrom utils import *\n\n\ndef find_optimal_param(lda, x_train, y_train):\n\n probs_train = lda.predict_proba(x_train)[:, 1]\n\n y_train = [x for _,x in sorted(zip(probs_train,y_train))]\n y_train = np.array(y_train)\n probs_train.sort()\n Se = []\n Sp = []\n for p in range(len(probs_train)):\n tp = np.count_nonzero(y_train[p:] == 1)\n fp = np.count_nonzero(y_train[p:] == 0)\n tn = np.count_nonzero(y_train[:p] == 0)\n fn = np.count_nonzero(y_train[:p] == 1)\n Se.append(tp/(tp+fn))\n Sp.append(tn/(tn+fp))\n\n mx = np.argmax(-(1-np.array(Sp) - np.array(Se)))\n\n return probs_train[mx]\n\ndef predict(lda, x, y, m):\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n if len(x) != 0:\n probs= lda.predict_proba(x)[:, 1]\n\n for j in range(len(x)):\n if probs[j] > m:\n if y[j] == 1:\n tp+=1\n else:\n fp+=1\n else:\n if y[j] == 1:\n fn +=1\n else:\n tn +=1\n\n return tp, fp, fn, tn\n\nfrom methodutils import FdaUtils\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n\n\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n\n def fit(self, x, y):\n self.method.fit(x, y)\n\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if (max(y[left]) == 0 or min(y[right]) == 1):\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n\n\n def divide_data(self, x):\n probs = self.method.predict_proba(x)[:, 1]\n left = (probs <= self.m)\n right = (probs > self.m)\n return left, right\n\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred =self.right.predict(x[right])\n pred = np.ones(x.shape[0])*2\n pred[left] = l_pred\n pred[right] = r_pred\n\n return pred\n\n\n\nif __name__ == \"__main__\":\n np.seterr(all='raise')\n from sklearn.metrics import confusion_matrix\n from dataset import load_dataset, load_new_dataset_6002, diagnosis_to_binary, MOST_FREQ_DIAGS_NUMS_NEW\n from fisher_discriminant import FisherDiscriminantAnalisys\n num_components = 100\n\n infile = open('C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\data_old_and_new_without_noise.pkl', 'rb')\n (old, new) = pkl.load(infile)\n infile.close()\n\n Y = old[\"y\"]\n outfile = open('C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\6002_old_Dif.pkl', 'rb')\n X = pkl.load(outfile)\n outfile.close()\n pca = PCA(n_components=X.shape[0])\n b = pca.fit_transform(X)\n\n\n\n for d in reversed(MOST_FREQ_DIAGS_NUMS_NEW):\n y_prediction =[]\n y_labels = []\n for train_index, test_index in cross_val(b.shape[0], 500):\n tree = FDA_node()\n tree.grow()\n tree.fit(b[train_index, :num_components],Y[train_index,d])\n tree.find_optimal_param(b[train_index, :num_components], Y[train_index,d])\n\n y_prediction.append(tree.predict(b[test_index, :num_components]))\n y_labels.append(Y[test_index, d])\n\n y_prediction = np.array(y_prediction).flatten()\n y_labels = np.array(y_labels).flatten()\n tn, fp, fn, tp = confusion_matrix(y_labels, y_prediction).ravel()\n\n test_se = tp / (tp + fn)\n test_sp = tn / (tn + fp)\n print(\"Val. Se = %s, Val. Sp = %s\" % (round(test_sp, 4), round(test_se, 4)))\n",
"step-ids": [
5,
6,
9,
11,
12
]
}
|
[
5,
6,
9,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('TRIANGULO: {:.3f}'.format(t))
<|reserved_special_token_0|>
print('CIRCULO: {:.3f}'.format(pi * c ** 2))
print('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))
print('QUADRADO: {:.3f}'.format(b ** 2))
print('RETANGULO: {:.3f}'.format(a * b))
<|reserved_special_token_1|>
linha = input().split()
a = float(linha[0])
b = float(linha[1])
c = float(linha[2])
t = a * c / 2
print('TRIANGULO: {:.3f}'.format(t))
pi = 3.14159
print('CIRCULO: {:.3f}'.format(pi * c ** 2))
print('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))
print('QUADRADO: {:.3f}'.format(b ** 2))
print('RETANGULO: {:.3f}'.format(a * b))
<|reserved_special_token_1|>
linha = input().split()
a = float(linha[0])
b = float(linha[1])
c = float(linha[2])
t = (a*c)/2
print('TRIANGULO: {:.3f}'.format(t))
pi = 3.14159
print("CIRCULO: {:.3f}".format(pi*c**2))
print('TRAPEZIO: {:.3f}'.format( ((a+b)*c)/2 ))
print("QUADRADO: {:.3f}".format(b**2))
print("RETANGULO: {:.3f}".format(a*b))
|
flexible
|
{
"blob_id": "d44d9003e9b86722a0fc1dfe958de462db9cd5f1",
"index": 1670,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('TRIANGULO: {:.3f}'.format(t))\n<mask token>\nprint('CIRCULO: {:.3f}'.format(pi * c ** 2))\nprint('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))\nprint('QUADRADO: {:.3f}'.format(b ** 2))\nprint('RETANGULO: {:.3f}'.format(a * b))\n",
"step-3": "linha = input().split()\na = float(linha[0])\nb = float(linha[1])\nc = float(linha[2])\nt = a * c / 2\nprint('TRIANGULO: {:.3f}'.format(t))\npi = 3.14159\nprint('CIRCULO: {:.3f}'.format(pi * c ** 2))\nprint('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))\nprint('QUADRADO: {:.3f}'.format(b ** 2))\nprint('RETANGULO: {:.3f}'.format(a * b))\n",
"step-4": "linha = input().split()\n\na = float(linha[0])\nb = float(linha[1])\nc = float(linha[2])\n\nt = (a*c)/2\n\nprint('TRIANGULO: {:.3f}'.format(t))\n\npi = 3.14159\n\nprint(\"CIRCULO: {:.3f}\".format(pi*c**2))\n\nprint('TRAPEZIO: {:.3f}'.format( ((a+b)*c)/2 ))\n\nprint(\"QUADRADO: {:.3f}\".format(b**2))\n\nprint(\"RETANGULO: {:.3f}\".format(a*b))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('D:\\Desktop\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:
df = pd.read_csv(data_obj)
<|reserved_special_token_0|>
pd.set_option('display.max_columns', 1000)
<|reserved_special_token_0|>
for i in range(len(Y)):
y = 0
if Y['好瓜_是'][i] == 1:
y = 1
ds.appendLinked(X.ix[i], y)
ds.calculateStatistics()
<|reserved_special_token_0|>
for n in range(testdata_temp.getLength()):
testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.
getSample(n)[1])
print(testdata)
testdata._convertToOneOfMany()
print(testdata)
<|reserved_special_token_0|>
for n in range(traindata_temp.getLength()):
traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.
getSample(n)[1])
traindata._convertToOneOfMany()
<|reserved_special_token_0|>
for i in range(50):
trainer_sd.trainEpochs(1)
from pybrain.utilities import percentError
trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']
)
testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),
testdata['class'])
print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,
'test error: ', testresult)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('D:\\Desktop\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:
df = pd.read_csv(data_obj)
<|reserved_special_token_0|>
dataset = pd.get_dummies(df)
pd.set_option('display.max_columns', 1000)
X = dataset[dataset.columns[:-2]]
Y = dataset[dataset.columns[-2:]]
labels = dataset.columns._data[-2:]
<|reserved_special_token_0|>
ds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for i in range(len(Y)):
y = 0
if Y['好瓜_是'][i] == 1:
y = 1
ds.appendLinked(X.ix[i], y)
ds.calculateStatistics()
testdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
testdata_temp, traindata_temp = ds.splitWithProportion(0.25)
for n in range(testdata_temp.getLength()):
testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.
getSample(n)[1])
print(testdata)
testdata._convertToOneOfMany()
print(testdata)
traindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for n in range(traindata_temp.getLength()):
traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.
getSample(n)[1])
traindata._convertToOneOfMany()
<|reserved_special_token_0|>
n_h = 5
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)
<|reserved_special_token_0|>
trainer_sd = BackpropTrainer(net, traindata)
for i in range(50):
trainer_sd.trainEpochs(1)
from pybrain.utilities import percentError
trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']
)
testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),
testdata['class'])
print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,
'test error: ', testresult)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pandas as pd
with open('D:\\Desktop\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:
df = pd.read_csv(data_obj)
<|reserved_special_token_0|>
dataset = pd.get_dummies(df)
pd.set_option('display.max_columns', 1000)
X = dataset[dataset.columns[:-2]]
Y = dataset[dataset.columns[-2:]]
labels = dataset.columns._data[-2:]
from pybrain.datasets import ClassificationDataSet
ds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for i in range(len(Y)):
y = 0
if Y['好瓜_是'][i] == 1:
y = 1
ds.appendLinked(X.ix[i], y)
ds.calculateStatistics()
testdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
testdata_temp, traindata_temp = ds.splitWithProportion(0.25)
for n in range(testdata_temp.getLength()):
testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.
getSample(n)[1])
print(testdata)
testdata._convertToOneOfMany()
print(testdata)
traindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for n in range(traindata_temp.getLength()):
traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.
getSample(n)[1])
traindata._convertToOneOfMany()
<|reserved_special_token_0|>
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import SoftmaxLayer
n_h = 5
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)
from pybrain.supervised import BackpropTrainer
trainer_sd = BackpropTrainer(net, traindata)
for i in range(50):
trainer_sd.trainEpochs(1)
from pybrain.utilities import percentError
trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']
)
testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),
testdata['class'])
print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,
'test error: ', testresult)
<|reserved_special_token_1|>
'''引入数据,并对数据进行预处理'''
# step 1 引入数据
import pandas as pd
with open('D:\\Desktop\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:
df = pd.read_csv(data_obj)
# Step 2 对数据进行预处理
# 对离散属性进行独热编码,定性转为定量,使每一个特征的取值作为一个新的特征
# 增加特征量 Catagorical Variable -> Dummy Variable
# 两种方法:Dummy Encoding VS One Hot Encoding
# 相同点:将Catagorical Variable转换为定量特征
# 不同点:Dummy Variable将Catagorical Variable转为n-1个特征变量
# One Hot Encoding 将其转换为n个特征变量,但会存在哑变量陷阱问题
# pandas自带的get_dummies()函数,可以将数据集中的所有标称变量转为哑变量
# sklearn 中的OneHotEncoder 也可以实现标称变量转为哑变量(注意要将非数字型提前通过LabelEncoder编码为数字类型,再进行转换,且只能处理单列属性)
# pybrain中的_convertToOneOfMany()可以Converts the target classes to a 1-of-k representation, retaining the old targets as a field class.
# 对target class独热编码,并且保留原target为字段类
'''
dataset = pd.get_dummies(df, columns=df.columns[:6]) # 将离散属性变为哑变量
dataset = pd.get_dummies(dataset, columns=[df.columns[8]]) # 将标签转为哑变量
# columns接受序列形式的对象,单个字符串不行
'''
dataset = pd.get_dummies(df)
pd.set_option('display.max_columns', 1000) # 把所有的列全部显示出来
X = dataset[dataset.columns[:-2]]
Y = dataset[dataset.columns[-2:]]
labels = dataset.columns._data[-2:]
# Step 3:将数据转换为SupervisedDataSet/ClassificationDtaSet对象
from pybrain.datasets import ClassificationDataSet
ds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for i in range(len(Y)):
y = 0
if Y['好瓜_是'][i] == 1:
y = 1
ds.appendLinked(X.ix[i], y)
ds.calculateStatistics() # 返回一个类直方图?搞不懂在做什么
# Step 4: 分开测试集和训练集
testdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
testdata_temp, traindata_temp = ds.splitWithProportion(0.25)
for n in range(testdata_temp.getLength()):
testdata.appendLinked(testdata_temp.getSample(n)[0],testdata_temp.getSample(n)[1])
print(testdata)
testdata._convertToOneOfMany()
print(testdata)
traindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)
for n in range(traindata_temp.getLength()):
traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.getSample(n)[1])
traindata._convertToOneOfMany()
'''
# 使用sklean的OneHotEncoder
# 缺点是只能单列进行操作,最后再复合,麻烦
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
a = LabelEncoder().fit_transform(df[df.columns[0]])
# dataset_One = OneHotEncoder.fit(df.values[])
# print(df['色泽']) # 单独的Series?
print(a)
aaa = OneHotEncoder(sparse=False).fit_transform(a.reshape(-1, 1))
print(aaa)
# 怎么复合暂时没写
'''
'''开始整神经网络'''
# Step 1 :创建神经网络框架
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import SoftmaxLayer
# 输入数据是 19维,输出是两维,隐层设置为5层
# 输出层使用Softmax激活,其他:学习率(learningrate=0.01),学习率衰减(lrdecay=1.0,每次训练一步学习率乘以),
# 详细(verbose=False)动量因子(momentum=0最后时步的梯度?),权值衰减?(weightdecay=0.0)
n_h = 5
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)
# Step 2 : 构建前馈网络标准BP算法
from pybrain.supervised import BackpropTrainer
trainer_sd = BackpropTrainer(net, traindata)
# # 或者使用累积BP算法,训练次数50次
# trainer_ac = BackpropTrainer(net, traindata, batchlearning=True)
# trainer_ac.trainEpochs(50)
# err_train, err_valid = trainer_ac.trainUntilConvergence(maxEpochs=50)
for i in range(50): # 训练50次,每及测试结果次打印训练结果
trainer_sd.trainEpochs(1) # 训练网络一次,
# 引入训练误差和测试误差
from pybrain.utilities import percentError
trainresult = percentError(trainer_sd.testOnClassData(), traindata['class'])
testresult = percentError(trainer_sd.testOnClassData(dataset=testdata), testdata['class'])
# 打印错误率
print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult, 'test error: ', testresult)
|
flexible
|
{
"blob_id": "682b3e1d6d40f4b279052ac27df19268d227fef8",
"index": 6899,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('D:\\\\Desktop\\\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n<mask token>\npd.set_option('display.max_columns', 1000)\n<mask token>\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics()\n<mask token>\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.\n getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\n<mask token>\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.\n getSample(n)[1])\ntraindata._convertToOneOfMany()\n<mask token>\nfor i in range(50):\n trainer_sd.trainEpochs(1)\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']\n )\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),\n testdata['class'])\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,\n 'test error: ', testresult)\n",
"step-3": "<mask token>\nwith open('D:\\\\Desktop\\\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n<mask token>\ndataset = pd.get_dummies(df)\npd.set_option('display.max_columns', 1000)\nX = dataset[dataset.columns[:-2]]\nY = dataset[dataset.columns[-2:]]\nlabels = dataset.columns._data[-2:]\n<mask token>\nds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics()\ntestdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\ntestdata_temp, traindata_temp = ds.splitWithProportion(0.25)\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.\n getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\ntraindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.\n getSample(n)[1])\ntraindata._convertToOneOfMany()\n<mask token>\nn_h = 5\nnet = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)\n<mask token>\ntrainer_sd = BackpropTrainer(net, traindata)\nfor i in range(50):\n trainer_sd.trainEpochs(1)\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']\n )\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),\n testdata['class'])\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,\n 'test error: ', testresult)\n",
"step-4": "<mask token>\nimport pandas as pd\nwith open('D:\\\\Desktop\\\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n<mask token>\ndataset = pd.get_dummies(df)\npd.set_option('display.max_columns', 1000)\nX = dataset[dataset.columns[:-2]]\nY = dataset[dataset.columns[-2:]]\nlabels = dataset.columns._data[-2:]\nfrom pybrain.datasets import ClassificationDataSet\nds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics()\ntestdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\ntestdata_temp, traindata_temp = ds.splitWithProportion(0.25)\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0], testdata_temp.\n getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\ntraindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.\n getSample(n)[1])\ntraindata._convertToOneOfMany()\n<mask token>\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.structure import SoftmaxLayer\nn_h = 5\nnet = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)\nfrom pybrain.supervised import BackpropTrainer\ntrainer_sd = BackpropTrainer(net, traindata)\nfor i in range(50):\n trainer_sd.trainEpochs(1)\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class']\n )\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata),\n testdata['class'])\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult,\n 'test error: ', testresult)\n",
"step-5": "'''引入数据,并对数据进行预处理'''\n\n# step 1 引入数据\nimport pandas as pd\nwith open('D:\\\\Desktop\\西瓜数据集3.0.csv', 'r', encoding='utf-8') as data_obj:\n df = pd.read_csv(data_obj)\n\n# Step 2 对数据进行预处理\n# 对离散属性进行独热编码,定性转为定量,使每一个特征的取值作为一个新的特征\n# 增加特征量 Catagorical Variable -> Dummy Variable\n# 两种方法:Dummy Encoding VS One Hot Encoding\n# 相同点:将Catagorical Variable转换为定量特征\n# 不同点:Dummy Variable将Catagorical Variable转为n-1个特征变量\n# One Hot Encoding 将其转换为n个特征变量,但会存在哑变量陷阱问题\n# pandas自带的get_dummies()函数,可以将数据集中的所有标称变量转为哑变量\n# sklearn 中的OneHotEncoder 也可以实现标称变量转为哑变量(注意要将非数字型提前通过LabelEncoder编码为数字类型,再进行转换,且只能处理单列属性)\n# pybrain中的_convertToOneOfMany()可以Converts the target classes to a 1-of-k representation, retaining the old targets as a field class.\n # 对target class独热编码,并且保留原target为字段类\n'''\ndataset = pd.get_dummies(df, columns=df.columns[:6]) # 将离散属性变为哑变量\ndataset = pd.get_dummies(dataset, columns=[df.columns[8]]) # 将标签转为哑变量\n # columns接受序列形式的对象,单个字符串不行\n'''\ndataset = pd.get_dummies(df)\npd.set_option('display.max_columns', 1000) # 把所有的列全部显示出来\n\nX = dataset[dataset.columns[:-2]]\nY = dataset[dataset.columns[-2:]]\nlabels = dataset.columns._data[-2:]\n\n# Step 3:将数据转换为SupervisedDataSet/ClassificationDtaSet对象\nfrom pybrain.datasets import ClassificationDataSet\nds = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor i in range(len(Y)):\n y = 0\n if Y['好瓜_是'][i] == 1:\n y = 1\n ds.appendLinked(X.ix[i], y)\nds.calculateStatistics() # 返回一个类直方图?搞不懂在做什么\n\n# Step 4: 分开测试集和训练集\ntestdata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\ntestdata_temp, traindata_temp = ds.splitWithProportion(0.25)\nfor n in range(testdata_temp.getLength()):\n testdata.appendLinked(testdata_temp.getSample(n)[0],testdata_temp.getSample(n)[1])\nprint(testdata)\ntestdata._convertToOneOfMany()\nprint(testdata)\ntraindata = ClassificationDataSet(19, 1, nb_classes=2, class_labels=labels)\nfor n in range(traindata_temp.getLength()):\n traindata.appendLinked(traindata_temp.getSample(n)[0], traindata_temp.getSample(n)[1])\ntraindata._convertToOneOfMany()\n'''\n# 使用sklean的OneHotEncoder\n# 缺点是只能单列进行操作,最后再复合,麻烦\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import LabelEncoder\na = LabelEncoder().fit_transform(df[df.columns[0]])\n# dataset_One = OneHotEncoder.fit(df.values[])\n# print(df['色泽']) # 单独的Series?\nprint(a)\naaa = OneHotEncoder(sparse=False).fit_transform(a.reshape(-1, 1))\nprint(aaa)\n# 怎么复合暂时没写\n'''\n\n'''开始整神经网络'''\n\n# Step 1 :创建神经网络框架\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.structure import SoftmaxLayer\n# 输入数据是 19维,输出是两维,隐层设置为5层\n# 输出层使用Softmax激活,其他:学习率(learningrate=0.01),学习率衰减(lrdecay=1.0,每次训练一步学习率乘以),\n# 详细(verbose=False)动量因子(momentum=0最后时步的梯度?),权值衰减?(weightdecay=0.0)\n\nn_h = 5\nnet = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)\n\n# Step 2 : 构建前馈网络标准BP算法\nfrom pybrain.supervised import BackpropTrainer\ntrainer_sd = BackpropTrainer(net, traindata)\n\n# # 或者使用累积BP算法,训练次数50次\n# trainer_ac = BackpropTrainer(net, traindata, batchlearning=True)\n# trainer_ac.trainEpochs(50)\n# err_train, err_valid = trainer_ac.trainUntilConvergence(maxEpochs=50)\n\nfor i in range(50): # 训练50次,每及测试结果次打印训练结果\n trainer_sd.trainEpochs(1) # 训练网络一次,\n\n # 引入训练误差和测试误差\n from pybrain.utilities import percentError\n trainresult = percentError(trainer_sd.testOnClassData(), traindata['class'])\n testresult = percentError(trainer_sd.testOnClassData(dataset=testdata), testdata['class'])\n # 打印错误率\n print('Epoch: %d', trainer_sd.totalepochs, 'train error: ', trainresult, 'test error: ', testresult)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AchievementGrant(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AchievementGrant(object):
def on_post(self, req, resp):
"""
Prideleni achievementu
Format dat:
{
"users": [ id ],
"task": (null|id),
"achievement": id
}
"""
try:
user = req.context['user']
data = json.loads(req.stream.read().decode('utf-8'))
if not user.is_logged_in() or not user.is_org():
resp.status = falcon.HTTP_400
return
errors = []
req.context['result'] = {'errors': [{'status': '401', 'title':
'Unauthorized', 'detail': 'Přístup odepřen.'}]}
for u in data['users']:
if not data['task']:
data['task'] = None
else:
evl = session.query(model.Evaluation).filter(model.
Evaluation.user == u).join(model.Module, model.
Module.id == model.Evaluation.module).filter(model.
Module.task == data['task']).first()
if not evl:
errors.append({'title': 'Uživatel ' + str(u) +
""" neodevzdal vybranou úlohu
"""})
continue
if session.query(model.UserAchievement).get((u, data[
'achievement'])):
errors.append({'title': 'Uživateli ' + str(u) +
' je již trofej přidělena\n'})
else:
ua = model.UserAchievement(user_id=u, achievement_id=
data['achievement'], task_id=data['task'])
session.add(ua)
session.commit()
if len(errors) > 0:
req.context['result'] = {'errors': errors}
else:
req.context['result'] = {}
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
<|reserved_special_token_1|>
import falcon
import json
from sqlalchemy.exc import SQLAlchemyError
from db import session
import model
import util
class AchievementGrant(object):
def on_post(self, req, resp):
"""
Prideleni achievementu
Format dat:
{
"users": [ id ],
"task": (null|id),
"achievement": id
}
"""
try:
user = req.context['user']
data = json.loads(req.stream.read().decode('utf-8'))
if not user.is_logged_in() or not user.is_org():
resp.status = falcon.HTTP_400
return
errors = []
req.context['result'] = {'errors': [{'status': '401', 'title':
'Unauthorized', 'detail': 'Přístup odepřen.'}]}
for u in data['users']:
if not data['task']:
data['task'] = None
else:
evl = session.query(model.Evaluation).filter(model.
Evaluation.user == u).join(model.Module, model.
Module.id == model.Evaluation.module).filter(model.
Module.task == data['task']).first()
if not evl:
errors.append({'title': 'Uživatel ' + str(u) +
""" neodevzdal vybranou úlohu
"""})
continue
if session.query(model.UserAchievement).get((u, data[
'achievement'])):
errors.append({'title': 'Uživateli ' + str(u) +
' je již trofej přidělena\n'})
else:
ua = model.UserAchievement(user_id=u, achievement_id=
data['achievement'], task_id=data['task'])
session.add(ua)
session.commit()
if len(errors) > 0:
req.context['result'] = {'errors': errors}
else:
req.context['result'] = {}
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
<|reserved_special_token_1|>
import falcon
import json
from sqlalchemy.exc import SQLAlchemyError
from db import session
import model
import util
class AchievementGrant(object):
def on_post(self, req, resp):
"""
Prideleni achievementu
Format dat:
{
"users": [ id ],
"task": (null|id),
"achievement": id
}
"""
try:
user = req.context['user']
data = json.loads(req.stream.read().decode('utf-8'))
if (not user.is_logged_in()) or (not user.is_org()):
resp.status = falcon.HTTP_400
return
errors = []
req.context['result'] = {
'errors': [{
'status': '401',
'title': 'Unauthorized',
'detail': 'Přístup odepřen.'
}]
}
for u in data['users']:
if not data['task']:
data['task'] = None
else:
evl = session.query(model.Evaluation).\
filter(model.Evaluation.user == u).\
join(model.Module,
model.Module.id == model.Evaluation.module).\
filter(model.Module.task == data['task']).\
first()
if not evl:
errors.append({
'title': ("Uživatel " + str(u) +
" neodevzdal vybranou úlohu\n")
})
continue
if session.query(model.UserAchievement).\
get((u, data['achievement'])):
errors.append({
'title': ("Uživateli " + str(u) +
" je již trofej přidělena\n")
})
else:
ua = model.UserAchievement(
user_id=u,
achievement_id=data['achievement'],
task_id=data['task']
)
session.add(ua)
session.commit()
if len(errors) > 0:
req.context['result'] = {'errors': errors}
else:
req.context['result'] = {}
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
flexible
|
{
"blob_id": "89ec04280ecfdfcba1923e2742e31d34750f894f",
"index": 4536,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AchievementGrant(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AchievementGrant(object):\n\n def on_post(self, req, resp):\n \"\"\"\n Prideleni achievementu\n\n Format dat:\n {\n \"users\": [ id ],\n \"task\": (null|id),\n \"achievement\": id\n }\n\n \"\"\"\n try:\n user = req.context['user']\n data = json.loads(req.stream.read().decode('utf-8'))\n if not user.is_logged_in() or not user.is_org():\n resp.status = falcon.HTTP_400\n return\n errors = []\n req.context['result'] = {'errors': [{'status': '401', 'title':\n 'Unauthorized', 'detail': 'Přístup odepřen.'}]}\n for u in data['users']:\n if not data['task']:\n data['task'] = None\n else:\n evl = session.query(model.Evaluation).filter(model.\n Evaluation.user == u).join(model.Module, model.\n Module.id == model.Evaluation.module).filter(model.\n Module.task == data['task']).first()\n if not evl:\n errors.append({'title': 'Uživatel ' + str(u) +\n \"\"\" neodevzdal vybranou úlohu\n\"\"\"})\n continue\n if session.query(model.UserAchievement).get((u, data[\n 'achievement'])):\n errors.append({'title': 'Uživateli ' + str(u) +\n ' je již trofej přidělena\\n'})\n else:\n ua = model.UserAchievement(user_id=u, achievement_id=\n data['achievement'], task_id=data['task'])\n session.add(ua)\n session.commit()\n if len(errors) > 0:\n req.context['result'] = {'errors': errors}\n else:\n req.context['result'] = {}\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n",
"step-4": "import falcon\nimport json\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom db import session\nimport model\nimport util\n\n\nclass AchievementGrant(object):\n\n def on_post(self, req, resp):\n \"\"\"\n Prideleni achievementu\n\n Format dat:\n {\n \"users\": [ id ],\n \"task\": (null|id),\n \"achievement\": id\n }\n\n \"\"\"\n try:\n user = req.context['user']\n data = json.loads(req.stream.read().decode('utf-8'))\n if not user.is_logged_in() or not user.is_org():\n resp.status = falcon.HTTP_400\n return\n errors = []\n req.context['result'] = {'errors': [{'status': '401', 'title':\n 'Unauthorized', 'detail': 'Přístup odepřen.'}]}\n for u in data['users']:\n if not data['task']:\n data['task'] = None\n else:\n evl = session.query(model.Evaluation).filter(model.\n Evaluation.user == u).join(model.Module, model.\n Module.id == model.Evaluation.module).filter(model.\n Module.task == data['task']).first()\n if not evl:\n errors.append({'title': 'Uživatel ' + str(u) +\n \"\"\" neodevzdal vybranou úlohu\n\"\"\"})\n continue\n if session.query(model.UserAchievement).get((u, data[\n 'achievement'])):\n errors.append({'title': 'Uživateli ' + str(u) +\n ' je již trofej přidělena\\n'})\n else:\n ua = model.UserAchievement(user_id=u, achievement_id=\n data['achievement'], task_id=data['task'])\n session.add(ua)\n session.commit()\n if len(errors) > 0:\n req.context['result'] = {'errors': errors}\n else:\n req.context['result'] = {}\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n",
"step-5": "import falcon\nimport json\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom db import session\nimport model\nimport util\n\n\nclass AchievementGrant(object):\n\n def on_post(self, req, resp):\n \"\"\"\n Prideleni achievementu\n\n Format dat:\n {\n \"users\": [ id ],\n \"task\": (null|id),\n \"achievement\": id\n }\n\n \"\"\"\n try:\n user = req.context['user']\n data = json.loads(req.stream.read().decode('utf-8'))\n\n if (not user.is_logged_in()) or (not user.is_org()):\n resp.status = falcon.HTTP_400\n return\n\n errors = []\n req.context['result'] = {\n 'errors': [{\n 'status': '401',\n 'title': 'Unauthorized',\n 'detail': 'Přístup odepřen.'\n }]\n }\n\n for u in data['users']:\n if not data['task']:\n data['task'] = None\n else:\n evl = session.query(model.Evaluation).\\\n filter(model.Evaluation.user == u).\\\n join(model.Module,\n model.Module.id == model.Evaluation.module).\\\n filter(model.Module.task == data['task']).\\\n first()\n\n if not evl:\n errors.append({\n 'title': (\"Uživatel \" + str(u) +\n \" neodevzdal vybranou úlohu\\n\")\n })\n continue\n\n if session.query(model.UserAchievement).\\\n get((u, data['achievement'])):\n errors.append({\n 'title': (\"Uživateli \" + str(u) +\n \" je již trofej přidělena\\n\")\n })\n else:\n ua = model.UserAchievement(\n user_id=u,\n achievement_id=data['achievement'],\n task_id=data['task']\n )\n session.add(ua)\n\n session.commit()\n if len(errors) > 0:\n req.context['result'] = {'errors': errors}\n else:\n req.context['result'] = {}\n\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!../virtual_env/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from models.base import metadata
from sqlalchemy import create_engine
import os.path
engine = create_engine(SQLALCHEMY_DATABASE_URI)
metadata.create_all(engine)
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
|
normal
|
{
"blob_id": "9bbf0953d228c970764b8ba94675346820bc5d90",
"index": 3006,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmetadata.create_all(engine)\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,\n api.version(SQLALCHEMY_MIGRATE_REPO))\n",
"step-3": "<mask token>\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\nmetadata.create_all(engine)\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,\n api.version(SQLALCHEMY_MIGRATE_REPO))\n",
"step-4": "from migrate.versioning import api\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom config import SQLALCHEMY_MIGRATE_REPO\nfrom models.base import metadata\nfrom sqlalchemy import create_engine\nimport os.path\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\nmetadata.create_all(engine)\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,\n api.version(SQLALCHEMY_MIGRATE_REPO))\n",
"step-5": "#!../virtual_env/bin/python\nfrom migrate.versioning import api\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom config import SQLALCHEMY_MIGRATE_REPO\nfrom models.base import metadata\nfrom sqlalchemy import create_engine\n\nimport os.path\n\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\n\nmetadata.create_all(engine)\n\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
pattern1 = r"[:]{2}[A-Z][a-z]{2,}[:]{2}|[\*]{2}[a-zA-Z]{3,}[\*]{2}"
pattern2 = r"([0-9]+)"
data = input()
valid_emojis = re.findall(pattern1, data)
numbers_ascii = re.findall(pattern2, data)
numbers_total = ""
for num in numbers_ascii:
numbers_total += num
cool_threshold = 1
for i in numbers_total:
i = int(i)
cool_threshold *= i
print(f"Cool threshold: {cool_threshold}")
cool_emoji = []
for j in valid_emojis:
sum_ch = 0
for ch in j:
if ch == "*" or ch == ":":
continue
sum_ch += ord(ch)
if sum_ch > cool_threshold:
cool_emoji.append(j)
print(f"{len(valid_emojis)} emojis found in the text. The cool ones are:")
print(*cool_emoji,sep='\n')
|
normal
|
{
"blob_id": "c2201a281ccd0833b0d7d2219d97ce3175fb012b",
"index": 2042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor num in numbers_ascii:\n numbers_total += num\n<mask token>\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\nprint(f'Cool threshold: {cool_threshold}')\n<mask token>\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == '*' or ch == ':':\n continue\n sum_ch += ord(ch)\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\nprint(f'{len(valid_emojis)} emojis found in the text. The cool ones are:')\nprint(*cool_emoji, sep='\\n')\n",
"step-3": "<mask token>\npattern1 = '[:]{2}[A-Z][a-z]{2,}[:]{2}|[\\\\*]{2}[a-zA-Z]{3,}[\\\\*]{2}'\npattern2 = '([0-9]+)'\ndata = input()\nvalid_emojis = re.findall(pattern1, data)\nnumbers_ascii = re.findall(pattern2, data)\nnumbers_total = ''\nfor num in numbers_ascii:\n numbers_total += num\ncool_threshold = 1\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\nprint(f'Cool threshold: {cool_threshold}')\ncool_emoji = []\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == '*' or ch == ':':\n continue\n sum_ch += ord(ch)\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\nprint(f'{len(valid_emojis)} emojis found in the text. The cool ones are:')\nprint(*cool_emoji, sep='\\n')\n",
"step-4": "import re\npattern1 = '[:]{2}[A-Z][a-z]{2,}[:]{2}|[\\\\*]{2}[a-zA-Z]{3,}[\\\\*]{2}'\npattern2 = '([0-9]+)'\ndata = input()\nvalid_emojis = re.findall(pattern1, data)\nnumbers_ascii = re.findall(pattern2, data)\nnumbers_total = ''\nfor num in numbers_ascii:\n numbers_total += num\ncool_threshold = 1\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\nprint(f'Cool threshold: {cool_threshold}')\ncool_emoji = []\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == '*' or ch == ':':\n continue\n sum_ch += ord(ch)\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\nprint(f'{len(valid_emojis)} emojis found in the text. The cool ones are:')\nprint(*cool_emoji, sep='\\n')\n",
"step-5": "import re\n\npattern1 = r\"[:]{2}[A-Z][a-z]{2,}[:]{2}|[\\*]{2}[a-zA-Z]{3,}[\\*]{2}\"\npattern2 = r\"([0-9]+)\"\ndata = input()\nvalid_emojis = re.findall(pattern1, data)\nnumbers_ascii = re.findall(pattern2, data)\n\nnumbers_total = \"\"\n\nfor num in numbers_ascii:\n numbers_total += num\n\ncool_threshold = 1\n\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\n\n\nprint(f\"Cool threshold: {cool_threshold}\")\n\ncool_emoji = []\n\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == \"*\" or ch == \":\":\n continue\n sum_ch += ord(ch)\n\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\n\nprint(f\"{len(valid_emojis)} emojis found in the text. The cool ones are:\")\nprint(*cool_emoji,sep='\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pymysql
def testeSelect(db):
#创建查询游标
cur1 = db.cursor()
# 使用 execute() 方法执行 SQL 查询
cur1.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cur1.fetchone()
print(dir(data))
print ("cur1 : %s " % cur1)
print ("Database version : %s " % data)
def dropTable(db):
#创建查询游标
cur1 = db.cursor()
cur1.execute("drop table if exists python_demo")
print('dropTable',cur1)
def createTable(db):
#创建查询游标
cur1 = db.cursor()
sql = '''
CREATE TABLE IF NOT EXISTS python_demo (
MEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',
MEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',
MEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',
MEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',
COMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',
REG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',
REG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',
ENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',
REGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',
REG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',
JUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',
BUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',
COM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',
COM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',
PERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',
ZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',
CON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',
CON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',
CON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',
CON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',
CON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',
CON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',
CON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',
CON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',
CON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',
THERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',
BIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',
BIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',
BIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',
TAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',
TAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',
TAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',
ORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',
ORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',
ORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',
BANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',
BANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',
BANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',
BANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',
BANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',
BANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',
INVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',
INVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',
INVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',
APPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',
BUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',
SELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',
THIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',
MAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',
MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',
ERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',
REG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',
STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',
AUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',
AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',
AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',
AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',
MDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',
MDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',
MDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',
MDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',
MEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',
CHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',
ALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',
LANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',
CREATE_USER INT(20) NOT NULL COMMENT '创建者',
CREATE_DATE DATETIME NOT NULL COMMENT '创建时间',
UPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',
UPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',
DELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',
DELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',
BUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',
AUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',
AUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',
SELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',
SELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',
SELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',
IS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',
INVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',
INVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',
PRIMARY KEY (MEMBER_ID)
)
COMMENT='会员信息表'
COLLATE='utf8_general_ci'
ENGINE=InnoDB
'''
cur1.execute(sql)
print('createTabl',cur1)
def selectTable(db):
#创建查询游标
cur1 = db.cursor()
cur1.execute("select member_name,MEMBER_CODE,member_id from python_demo limit 10")
# 使用 fetchall() 接收全部的返回结果行
data = cur1.fetchall()
for index,item in enumerate(data):
print(index,sep=' ', end=' ')
for index2,item2 in enumerate(item):
print(item2,sep=' ', end=' ')
print("")
def insertTable(db):
#创建查询游标
cur1 = db.cursor()
cur1.execute("INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)")
# 使用 fetchall() 接收全部的返回结果行
#data = cur1.rowcount()
#print('insertTable',data)
# 打开数据库连接
db = pymysql.connect(host='127.0.0.1',user='pu3147',
password='1qaz@WSX',database='demo',port=3306,charset='UTF8')
dropTable(db)
createTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
testeSelect(db)
selectTable(db)
# 关闭数据库连接
db.close()
|
normal
|
{
"blob_id": "75133dd924f8f3f028075c5d2109bb79ddc7fe87",
"index": 434,
"step-1": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\n<mask token>\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\ndef selectTable(db):\n cur1 = db.cursor()\n cur1.execute(\n 'select member_name,MEMBER_CODE,member_id from python_demo limit 10')\n data = cur1.fetchall()\n for index, item in enumerate(data):\n print(index, sep=' ', end=' ')\n for index2, item2 in enumerate(item):\n print(item2, sep=' ', end=' ')\n print('')\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\ndef selectTable(db):\n cur1 = db.cursor()\n cur1.execute(\n 'select member_name,MEMBER_CODE,member_id from python_demo limit 10')\n data = cur1.fetchall()\n for index, item in enumerate(data):\n print(index, sep=' ', end=' ')\n for index2, item2 in enumerate(item):\n print(item2, sep=' ', end=' ')\n print('')\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\n<mask token>\ndropTable(db)\ncreateTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ntesteSelect(db)\nselectTable(db)\ndb.close()\n",
"step-4": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\ndef selectTable(db):\n cur1 = db.cursor()\n cur1.execute(\n 'select member_name,MEMBER_CODE,member_id from python_demo limit 10')\n data = cur1.fetchall()\n for index, item in enumerate(data):\n print(index, sep=' ', end=' ')\n for index2, item2 in enumerate(item):\n print(item2, sep=' ', end=' ')\n print('')\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\ndb = pymysql.connect(host='127.0.0.1', user='pu3147', password='1qaz@WSX',\n database='demo', port=3306, charset='UTF8')\ndropTable(db)\ncreateTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ntesteSelect(db)\nselectTable(db)\ndb.close()\n",
"step-5": "import pymysql\n\n\n\ndef testeSelect(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\t# 使用 execute() 方法执行 SQL 查询 \n\tcur1.execute(\"SELECT VERSION()\")\n\t \n\t# 使用 fetchone() 方法获取单条数据.\n\tdata = cur1.fetchone()\n\tprint(dir(data))\n\tprint (\"cur1 : %s \" % cur1) \n\tprint (\"Database version : %s \" % data)\n\ndef dropTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tcur1.execute(\"drop table if exists python_demo\")\n\tprint('dropTable',cur1)\n\t \n\ndef createTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tsql = '''\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t'''\n\n\tcur1.execute(sql)\n\t\n\tprint('createTabl',cur1)\n\ndef selectTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tcur1.execute(\"select member_name,MEMBER_CODE,member_id from python_demo limit 10\")\n\n\t# 使用 fetchall() 接收全部的返回结果行\n\tdata = cur1.fetchall()\n\tfor index,item in enumerate(data):\n\t\tprint(index,sep=' ', end=' ')\n\t\tfor index2,item2 in enumerate(item):\n\t\t\tprint(item2,sep=' ', end=' ')\n\t\tprint(\"\")\n\ndef insertTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tcur1.execute(\"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\")\n\n\t# 使用 fetchall() 接收全部的返回结果行\n\t#data = cur1.rowcount()\n\n\t#print('insertTable',data)\n\n# 打开数据库连接\ndb = pymysql.connect(host='127.0.0.1',user='pu3147',\n\tpassword='1qaz@WSX',database='demo',port=3306,charset='UTF8')\n\n\ndropTable(db)\ncreateTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ntesteSelect(db)\nselectTable(db)\n\n# 关闭数据库连接\ndb.close()\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
# -*- coding:utf-8 -*-
'''
Created on 2013. 4. 30.
@author: Hwang-JinHwan
parsing the txt file which are generated by coping the pdf nova praxis rpg rule book
to create bootstrap document
'''
import re
import codecs
template = """
<head>
<style type="text/css">
body {{
padding-top: 60px;
padding-bottom: 40px;
}}
</style>
<link href="//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap-combined.min.css" rel="stylesheet">
</head>
<body>
<div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner">
<div class="container">
<ul class="nav">
{nav_content}
</ul>
</div>
</div>
</div>
<div class='container'>
<div class="row">
{body_content}
</div>
</div>
<script src="//code.jquery.com/jquery-1.4.2.min.js"></script>
<script src="//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/js/bootstrap.min.js"></script>
</body>
"""
"""
<li class="dropdown">
<a data-toggle="dropdown" class="dropdown-toggle" href="#">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
<li>
<a href="#">2-level Dropdown <i class="icon-arrow-right"></i></a>
<ul class="dropdown-menu sub-menu">
<li><a href="#">Action</a></li>
<li><a href="#">Another action</a></li>
<li><a href="#">Something else here</a></li>
<li class="divider"></li>
<li class="nav-header">Nav header</li>
<li><a href="#">Separated link</a></li>
<li><a href="#">One more separated link</a></li>
</ul>
</li>
<li><a href="#">Another action</a></li>
<li><a href="#">Something else here</a></li>
<li class="divider"></li>
<li class="nav-header">Nav header</li>
<li><a href="#">Separated link</a></li>
<li><a href="#">One more separated link</a></li>
</ul>
</li>
"""
nav_template ="""
<li class="dropdown">
<a data-toggle="dropdown" class="dropdown-toggle" href="#">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
{drop_down_content}
<li>
<a href="#">Link</a>
</li>
<li class="active">
<a href="#">Link</a>
</li>
<li class="divider"></li>
<li>
<a href="#">Link</a>
</li>
</ul>
</li>
"""
indexed_title = []
def resolve_index_line(line):
def resolve(matchObj):
title = matchObj.group(1)
indexed_title.append(title.lower())
dot = matchObj.group(2)
page_num = matchObj.group(3)
return ur'<a href="#%s">%s</a>%s<a href="#p%s">%s</a><br>'%(title.lower(), title, dot, page_num.lower(), page_num)
return re.sub(ur'(\w.*?)\s*(\.{2,})\s*(\d+)', resolve, line, re.M | re.I)
curr_searching_title = 0
def resovle_title_line(line):
global curr_searching_title
if line.rstrip().lower() == indexed_title[curr_searching_title]:
curr_searching_title+=1
level = 3
if line.startswith("CHAPTER") :
level = 1
return '<h{level}><a name="{anchor}"></a>{text}</h{level}>\n'.format(anchor=line.rstrip().lower(), text=line.rstrip(), level=level)
else:
return line
"""if line.isupper() :
if re.match("^[A-Z]([A-Z0-9]|\s){3,}$", line, re.M):
titles.append(line.rstrip())
return '<h3><a name="%s"></a>%s</h3>\n' % (line.rstrip(), line.rstrip())"""
def resolve_normal_line(line):
sub_line = re.sub(ur'(pg. |page )(\d+)', ur'<a href="#p\2">\g<0></a>', line, re.M | re.I)
if line != sub_line:
print line,
print sub_line,
sub_line = "<p>%s</p>\n" % sub_line.rstrip()
return sub_line
def get_nav_content():
drop_down_content = []
for title in indexed_title:
drop_down_content.append('<li><a href="#%s">%s</a></li>\n' % (title, title))
return nav_template.format(drop_down_content="".join(drop_down_content))
if __name__ == '__main__':
# fr = open("resource/bar_test.txt", 'r')
fr = open("resource/nova praxis all.txt", 'r')
lines = fr.readlines()
toc_page_num = 5
prev_page_num = 2
body_content = []
buffered = []
for line in lines:
if(prev_page_num+1 <= toc_page_num):
ret = resolve_index_line(line)
if ret != line:
buffered.append(ret)
continue
elif(prev_page_num+1 >toc_page_num):
ret = resovle_title_line(line)
if ret != line:
buffered.append(ret)
continue
# data = fr.read()
matchObj = re.match(ur'^(\d+)$', line, re.M | re.I)
if matchObj:
page_num = int(matchObj.group(1))
if page_num < prev_page_num or page_num > prev_page_num + 2:
line = resolve_normal_line(line)
buffered.append(line)
continue
matched_tail = matchObj.group()
print "#MATCH:", matched_tail
buffered.append(matched_tail + "<br>\n")
buffered.insert(0, '<div class="well">')
buffered.append(r'</div>')
buffered.insert(0, '<a name="p%s"></a>' % page_num)
body_content.append("".join(buffered))
buffered = []
buffered.append(line[len(matched_tail):])
prev_page_num = page_num
else:
line = resolve_normal_line(line)
buffered.append(line)
fw = codecs.open("resource/nova_praxis.html", 'w', encoding='utf-8')
body_content.append("".join(buffered))
fw.write(template.format(body_content="".join(body_content), nav_content=get_nav_content()))
fr.close()
fw.close()
|
normal
|
{
"blob_id": "c036621c5f03d94987b4da004d063d11a7cc8424",
"index": 4418,
"step-1": "# -*- coding:utf-8 -*-\r\n'''\r\nCreated on 2013. 4. 30.\r\n\r\n@author: Hwang-JinHwan\r\n\r\nparsing the txt file which are generated by coping the pdf nova praxis rpg rule book \r\nto create bootstrap document\r\n'''\r\nimport re\r\nimport codecs\r\n\r\ntemplate = \"\"\"\r\n<head>\r\n <style type=\"text/css\">\r\n body {{\r\n padding-top: 60px;\r\n padding-bottom: 40px;\r\n }}\r\n </style>\r\n\r\n <link href=\"//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap-combined.min.css\" rel=\"stylesheet\">\r\n</head>\r\n<body>\r\n<div class=\"navbar navbar-inverse navbar-fixed-top\">\r\n <div class=\"navbar-inner\">\r\n <div class=\"container\">\r\n \r\n <ul class=\"nav\">\r\n {nav_content}\r\n </ul>\r\n \r\n </div>\r\n </div>\r\n </div> \r\n<div class='container'>\r\n<div class=\"row\">\r\n {body_content}\r\n</div>\r\n</div>\r\n<script src=\"//code.jquery.com/jquery-1.4.2.min.js\"></script> \r\n<script src=\"//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/js/bootstrap.min.js\"></script>\r\n</body>\r\n\"\"\"\r\n\r\n\"\"\"\r\n<li class=\"dropdown\">\r\n <a data-toggle=\"dropdown\" class=\"dropdown-toggle\" href=\"#\">Dropdown <b class=\"caret\"></b></a>\r\n <ul class=\"dropdown-menu\">\r\n <li>\r\n <a href=\"#\">2-level Dropdown <i class=\"icon-arrow-right\"></i></a>\r\n <ul class=\"dropdown-menu sub-menu\">\r\n <li><a href=\"#\">Action</a></li>\r\n <li><a href=\"#\">Another action</a></li>\r\n <li><a href=\"#\">Something else here</a></li>\r\n <li class=\"divider\"></li>\r\n <li class=\"nav-header\">Nav header</li>\r\n <li><a href=\"#\">Separated link</a></li>\r\n <li><a href=\"#\">One more separated link</a></li>\r\n </ul>\r\n </li>\r\n <li><a href=\"#\">Another action</a></li>\r\n <li><a href=\"#\">Something else here</a></li>\r\n <li class=\"divider\"></li>\r\n <li class=\"nav-header\">Nav header</li>\r\n <li><a href=\"#\">Separated link</a></li>\r\n <li><a href=\"#\">One more separated link</a></li>\r\n </ul>\r\n</li>\r\n\r\n\"\"\"\r\n\r\n\r\nnav_template =\"\"\"\r\n<li class=\"dropdown\">\r\n <a data-toggle=\"dropdown\" class=\"dropdown-toggle\" href=\"#\">Dropdown <b class=\"caret\"></b></a>\r\n <ul class=\"dropdown-menu\">\r\n {drop_down_content}\r\n <li>\r\n <a href=\"#\">Link</a>\r\n </li>\r\n <li class=\"active\">\r\n <a href=\"#\">Link</a>\r\n </li>\r\n <li class=\"divider\"></li>\r\n <li>\r\n <a href=\"#\">Link</a>\r\n </li>\r\n </ul>\r\n</li>\r\n\"\"\"\r\nindexed_title = []\r\n \r\n\r\ndef resolve_index_line(line):\r\n def resolve(matchObj):\r\n title = matchObj.group(1)\r\n indexed_title.append(title.lower())\r\n dot = matchObj.group(2)\r\n page_num = matchObj.group(3)\r\n return ur'<a href=\"#%s\">%s</a>%s<a href=\"#p%s\">%s</a><br>'%(title.lower(), title, dot, page_num.lower(), page_num)\r\n \r\n return re.sub(ur'(\\w.*?)\\s*(\\.{2,})\\s*(\\d+)', resolve, line, re.M | re.I)\r\n \r\n\r\ncurr_searching_title = 0\r\ndef resovle_title_line(line):\r\n global curr_searching_title\r\n if line.rstrip().lower() == indexed_title[curr_searching_title]:\r\n curr_searching_title+=1\r\n level = 3\r\n if line.startswith(\"CHAPTER\") :\r\n level = 1\r\n return '<h{level}><a name=\"{anchor}\"></a>{text}</h{level}>\\n'.format(anchor=line.rstrip().lower(), text=line.rstrip(), level=level)\r\n else:\r\n return line\r\n \r\n \"\"\"if line.isupper() : \r\n if re.match(\"^[A-Z]([A-Z0-9]|\\s){3,}$\", line, re.M):\r\n titles.append(line.rstrip())\r\n return '<h3><a name=\"%s\"></a>%s</h3>\\n' % (line.rstrip(), line.rstrip())\"\"\"\r\n\r\ndef resolve_normal_line(line):\r\n sub_line = re.sub(ur'(pg. |page )(\\d+)', ur'<a href=\"#p\\2\">\\g<0></a>', line, re.M | re.I)\r\n if line != sub_line:\r\n print line,\r\n print sub_line,\r\n sub_line = \"<p>%s</p>\\n\" % sub_line.rstrip()\r\n return sub_line\r\n \r\ndef get_nav_content(): \r\n drop_down_content = []\r\n for title in indexed_title:\r\n drop_down_content.append('<li><a href=\"#%s\">%s</a></li>\\n' % (title, title))\r\n return nav_template.format(drop_down_content=\"\".join(drop_down_content))\r\n\r\nif __name__ == '__main__':\r\n # fr = open(\"resource/bar_test.txt\", 'r')\r\n fr = open(\"resource/nova praxis all.txt\", 'r')\r\n \r\n lines = fr.readlines()\r\n \r\n toc_page_num = 5\r\n \r\n prev_page_num = 2\r\n \r\n body_content = []\r\n buffered = []\r\n for line in lines:\r\n if(prev_page_num+1 <= toc_page_num):\r\n ret = resolve_index_line(line)\r\n if ret != line:\r\n buffered.append(ret)\r\n continue\r\n elif(prev_page_num+1 >toc_page_num):\r\n ret = resovle_title_line(line)\r\n if ret != line:\r\n buffered.append(ret)\r\n continue\r\n \r\n # data = fr.read()\r\n matchObj = re.match(ur'^(\\d+)$', line, re.M | re.I)\r\n if matchObj:\r\n page_num = int(matchObj.group(1))\r\n if page_num < prev_page_num or page_num > prev_page_num + 2:\r\n line = resolve_normal_line(line)\r\n buffered.append(line)\r\n continue\r\n matched_tail = matchObj.group()\r\n print \"#MATCH:\", matched_tail\r\n buffered.append(matched_tail + \"<br>\\n\")\r\n buffered.insert(0, '<div class=\"well\">')\r\n buffered.append(r'</div>')\r\n buffered.insert(0, '<a name=\"p%s\"></a>' % page_num)\r\n \r\n body_content.append(\"\".join(buffered))\r\n \r\n buffered = []\r\n buffered.append(line[len(matched_tail):])\r\n prev_page_num = page_num\r\n else:\r\n line = resolve_normal_line(line)\r\n buffered.append(line)\r\n \r\n fw = codecs.open(\"resource/nova_praxis.html\", 'w', encoding='utf-8')\r\n body_content.append(\"\".join(buffered))\r\n fw.write(template.format(body_content=\"\".join(body_content), nav_content=get_nav_content()))\r\n \r\n \r\n fr.close() \r\n fw.close()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from bs4 import BeautifulSoup
import urllib2
import datetime
import re
import csv
import sys
import time
import bb_load as bb_l
import pandas as pd
import requests
#Scrape the web for new buybacks
def scrape_buybacks():
'''
(NoneType) -> scraped_database.csv, database=open('scrape_database.csv', 'r')
Version 3.0, MSP @ 11:00 04.06.16
'''
#Define some of the variables used
start_time = time.time()
stock_list = []
date_list = []
bb_list = []
not_added = int(0)
full_switch = 'y'
#Load reference database by external function
try:
existing_database = read_existing_scrapefile()
print ('Comparing existing database to new buybacks.')
first = existing_database[0]
first_date = first[0:first.find(',')]
full_switch = raw_input('Do a full search beyond the most recent date '\
+'in database? y/n: ')
except (IOError, Warning):
print 'Warning: No prior database available.', '\n' \
'No reference check will be conducted; proceed with a new database file.', '\n'
existing_database = []
first_date = 0
#Run a for loop to scrape all 5 pages of data
for numb in ('1', '2', '3', '4', '5'):
url = ("http://www.rttnews.com/CorpInfo/StockBuybacks.aspx?PageNum=" + numb)
try: #Scrape the page
soup = BeautifulSoup(requests.get(url).content, "html.parser")
except (Warning, IOError): #Inform of any problems
print 'Failed to scrape page number ' + numb + '.' + '\n' \
'The remote host could have terminated the connection.' + '\n' \
'Scraping terminated; try to run the program again.'
sys.exit(0)
end_search = False
#Scrape the relevant info for all announcements in ODD rows
for item in soup.select(".ecoCalContent"):
count = 0
#Scrape the relevant info for an individual announcement
for numb in ["1","2","3","4","5","6"]:
string = ".tblContent" + numb
count = count + 1
start = int(str(item.select(string)).find('">') + 2)
stop = int(str(item.select(string)).find('</'))
extract = str(item.select(string))[start:stop]
if count == 1:
date = extract
y = int(date[date.rfind("/")+1:len(date)])+2000
try:
d = int(date[date.find("/")+1:len(date)-date.find("/")-2])
except ValueError:
d = 1
m = int(date[0:date.find("/")])
date = datetime.datetime(y,m,d).strftime("%Y-%m-%d")
if count == 2:
ticker = extract[extract.find(">")+1:len(extract)]
if ticker.find(",") > 0:
while ticker.count(",") > 1: # strip until unly one comma left
ticker = ticker[ticker.find(",")+1:len(ticker)] # Strip before first comma
ticker = ticker[0:ticker.find(",")] # Strip after second comma
if ticker.find(".") > 0:
ticker = ticker[0:ticker.find(".")]
ticker = filter(str.isupper, ticker)
if count == 4:
buyback = extract
unit = buyback.join(re.findall("[a-zA-Z]+", buyback))
val = re.findall(r"[-+]?\d*\.\d+|\d+", buyback)
val = float(val[0])
if unit == "":
val = val / 1000000
elif unit == "K":
val = val / 1000
elif unit == "Bln":
val = val * 1000
date_list.append(date)
stock_list.append(ticker)
bb_list.append(val)
#Build the aggregated list and removing buybacks
#already in the existing buyback database
teststr = str(date)+','+str(ticker)+','+str(val)
if teststr in existing_database:
date_list.pop()
stock_list.pop()
bb_list.pop()
not_added = not_added + 1
#Scrape the relevant info for all announcements in EVEN rows
for item in soup.select(".ecoCalAltContent"):
count = 0
#Scrape the relevant info for an individual announcement
for numb in ["1","2","3","4","5","6"]:
string = ".tblContent" + numb
count = count + 1
start = int(str(item.select(string)).find('">') + 2)
stop = int(str(item.select(string)).find('</'))
extract = str(item.select(string))[start:stop]
if count == 1:
date = extract
y = int(date[date.rfind("/")+1:len(date)])+2000
try:
d = int(date[date.find("/")+1:len(date)-date.find("/")-2])
except ValueError:
d = 1
m = int(date[0:date.find("/")])
date = datetime.datetime(y,m,d).strftime("%Y-%m-%d")
if count == 2:
ticker = extract[extract.find(">")+1:len(extract)]
if ticker.find(",") > 0:
while ticker.count(",") > 1: # strip until unly one comma left
ticker = ticker[ticker.find(",")+1:len(ticker)] # Strip before first comma
ticker = ticker[0:ticker.find(",")] # Strip after second comma
if ticker.find(".") > 0:
ticker = ticker[0:ticker.find(".")]
ticker = filter(str.isupper, ticker)
if count == 4:
buyback = extract
unit = buyback.join(re.findall("[a-zA-Z]+", buyback))
val = re.findall(r"[-+]?\d*\.\d+|\d+", buyback)
val = float(val[0])
if unit == "":
val = val / 1000000
elif unit == "K":
val = val / 1000
elif unit == "Bln":
val = val * 1000
date_list.append(date)
stock_list.append(ticker)
bb_list.append(val)
#Build the aggregated list and removing buybacks
#already in the existing buyback database
teststr = str(date)+','+str(ticker)+','+str(val)
if teststr in existing_database:
date_list.pop()
stock_list.pop()
bb_list.pop()
not_added = not_added + 1
#Make a master list
master = [date_list, stock_list, bb_list]
with open('scrape_database.csv', 'ab') as scrapefile:
file_writer = csv.writer(scrapefile)
for i in range(len(master[0])):
file_writer.writerow([x[i] for x in master])
sort_existing_scrapefile()
print '\n', '---------------------------------------------------------'
print 'MODULE: NEW SHARE BUYBACKS FROM STOCKMAVEN.COM.'
print 'Output: ' + str(len(date_list)) + \
' buyback(s) added to scrape_database.csv.'
print ' ' + str(not_added) + ' buyback(s) scraped but not added to database'
print 'Run-time:', "%.2f" %(time.time() - start_time), 'sec'
print '---------------------------------------------------------' + '\n'
#Read the existing scrapefile into a list for comparison
def read_existing_scrapefile():
'''
(file open for reading) -> list of str
Read and return each row in the scrapefile
comprising date, ticker, and amount of a buyback and return
a list of strings containing this information
Precondition: the file scrapefile.csv must be available in
the root directory
'''
scrape_database = open('scrape_database.csv','r')
line = scrape_database.readline().strip('\n')
existing_database = []
while line !='':
existing_database.append(str(line))
line = scrape_database.readline().strip('\n')
scrape_database.close()
return existing_database
# Sort the existing scrapefile by descending dates
def sort_existing_scrapefile():
'''
Version update: MSP @ 00:12 29.04.14
( ) -> ( )
Sort the buyback database (scrape_database.csv) by descending dates.
'''
c = bb_l.load_buyback_df(-1,-1).T.sort('Date',ascending=False)
d = c.index.tolist()
c['Ticker'] = d
e = c['Date'].tolist()
f = c[['Ticker','Amount']]
f.index = e
f.to_csv('scrape_database.csv', header=False)
|
normal
|
{
"blob_id": "276bcb2e90c30f87c618106e5e862f00d082da34",
"index": 9224,
"step-1": "\r\nfrom bs4 import BeautifulSoup\r\nimport urllib2\r\nimport datetime\r\nimport re\r\nimport csv\r\nimport sys\r\nimport time\r\nimport bb_load as bb_l\r\nimport pandas as pd\r\nimport requests\r\n\r\n#Scrape the web for new buybacks\r\ndef scrape_buybacks():\r\n\r\n '''\r\n\r\n (NoneType) -> scraped_database.csv, database=open('scrape_database.csv', 'r')\r\n\r\n\r\n Version 3.0, MSP @ 11:00 04.06.16\r\n \r\n '''\r\n\r\n\r\n #Define some of the variables used\r\n start_time = time.time()\r\n stock_list = []\r\n date_list = []\r\n bb_list = []\r\n not_added = int(0)\r\n full_switch = 'y'\r\n\r\n #Load reference database by external function\r\n try:\r\n existing_database = read_existing_scrapefile()\r\n print ('Comparing existing database to new buybacks.')\r\n first = existing_database[0]\r\n first_date = first[0:first.find(',')]\r\n full_switch = raw_input('Do a full search beyond the most recent date '\\\r\n +'in database? y/n: ')\r\n except (IOError, Warning):\r\n print 'Warning: No prior database available.', '\\n' \\\r\n 'No reference check will be conducted; proceed with a new database file.', '\\n'\r\n existing_database = []\r\n first_date = 0\r\n \r\n \r\n #Run a for loop to scrape all 5 pages of data\r\n for numb in ('1', '2', '3', '4', '5'):\r\n url = (\"http://www.rttnews.com/CorpInfo/StockBuybacks.aspx?PageNum=\" + numb)\r\n\r\n try: #Scrape the page\r\n soup = BeautifulSoup(requests.get(url).content, \"html.parser\")\r\n\r\n except (Warning, IOError): #Inform of any problems\r\n print 'Failed to scrape page number ' + numb + '.' + '\\n' \\\r\n 'The remote host could have terminated the connection.' + '\\n' \\\r\n 'Scraping terminated; try to run the program again.'\r\n sys.exit(0)\r\n\r\n \r\n end_search = False\r\n\r\n #Scrape the relevant info for all announcements in ODD rows\r\n for item in soup.select(\".ecoCalContent\"):\r\n count = 0\r\n \r\n #Scrape the relevant info for an individual announcement\r\n for numb in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]:\r\n string = \".tblContent\" + numb\r\n count = count + 1\r\n \r\n start = int(str(item.select(string)).find('\">') + 2)\r\n stop = int(str(item.select(string)).find('</'))\r\n \r\n extract = str(item.select(string))[start:stop]\r\n\r\n if count == 1:\r\n date = extract\r\n y = int(date[date.rfind(\"/\")+1:len(date)])+2000\r\n try:\r\n d = int(date[date.find(\"/\")+1:len(date)-date.find(\"/\")-2])\r\n except ValueError:\r\n d = 1\r\n m = int(date[0:date.find(\"/\")])\r\n date = datetime.datetime(y,m,d).strftime(\"%Y-%m-%d\")\r\n \r\n if count == 2:\r\n ticker = extract[extract.find(\">\")+1:len(extract)]\r\n\r\n if ticker.find(\",\") > 0: \r\n while ticker.count(\",\") > 1: # strip until unly one comma left\r\n ticker = ticker[ticker.find(\",\")+1:len(ticker)] # Strip before first comma\r\n ticker = ticker[0:ticker.find(\",\")] # Strip after second comma\r\n if ticker.find(\".\") > 0: \r\n ticker = ticker[0:ticker.find(\".\")]\r\n\r\n ticker = filter(str.isupper, ticker)\r\n \r\n if count == 4:\r\n buyback = extract\r\n unit = buyback.join(re.findall(\"[a-zA-Z]+\", buyback))\r\n val = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", buyback)\r\n val = float(val[0])\r\n\r\n if unit == \"\":\r\n val = val / 1000000\r\n elif unit == \"K\":\r\n val = val / 1000\r\n elif unit == \"Bln\":\r\n val = val * 1000\r\n \r\n date_list.append(date)\r\n stock_list.append(ticker)\r\n bb_list.append(val)\r\n\r\n #Build the aggregated list and removing buybacks\r\n #already in the existing buyback database\r\n\r\n teststr = str(date)+','+str(ticker)+','+str(val)\r\n \r\n if teststr in existing_database:\r\n date_list.pop()\r\n stock_list.pop()\r\n bb_list.pop()\r\n not_added = not_added + 1\r\n\r\n #Scrape the relevant info for all announcements in EVEN rows\r\n for item in soup.select(\".ecoCalAltContent\"):\r\n count = 0\r\n \r\n #Scrape the relevant info for an individual announcement\r\n for numb in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]:\r\n string = \".tblContent\" + numb\r\n count = count + 1\r\n \r\n start = int(str(item.select(string)).find('\">') + 2)\r\n stop = int(str(item.select(string)).find('</'))\r\n \r\n extract = str(item.select(string))[start:stop]\r\n\r\n if count == 1:\r\n date = extract\r\n y = int(date[date.rfind(\"/\")+1:len(date)])+2000\r\n try:\r\n d = int(date[date.find(\"/\")+1:len(date)-date.find(\"/\")-2])\r\n except ValueError:\r\n d = 1\r\n m = int(date[0:date.find(\"/\")])\r\n date = datetime.datetime(y,m,d).strftime(\"%Y-%m-%d\")\r\n \r\n if count == 2:\r\n ticker = extract[extract.find(\">\")+1:len(extract)]\r\n\r\n if ticker.find(\",\") > 0: \r\n while ticker.count(\",\") > 1: # strip until unly one comma left\r\n ticker = ticker[ticker.find(\",\")+1:len(ticker)] # Strip before first comma\r\n ticker = ticker[0:ticker.find(\",\")] # Strip after second comma\r\n if ticker.find(\".\") > 0: \r\n ticker = ticker[0:ticker.find(\".\")]\r\n\r\n ticker = filter(str.isupper, ticker)\r\n\r\n if count == 4:\r\n buyback = extract\r\n unit = buyback.join(re.findall(\"[a-zA-Z]+\", buyback))\r\n val = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", buyback)\r\n val = float(val[0])\r\n\r\n if unit == \"\":\r\n val = val / 1000000\r\n elif unit == \"K\":\r\n val = val / 1000\r\n elif unit == \"Bln\":\r\n val = val * 1000\r\n \r\n date_list.append(date)\r\n stock_list.append(ticker)\r\n bb_list.append(val)\r\n\r\n #Build the aggregated list and removing buybacks\r\n #already in the existing buyback database\r\n\r\n teststr = str(date)+','+str(ticker)+','+str(val)\r\n \r\n if teststr in existing_database:\r\n date_list.pop()\r\n stock_list.pop()\r\n bb_list.pop()\r\n not_added = not_added + 1\r\n\r\n #Make a master list \r\n master = [date_list, stock_list, bb_list]\r\n\r\n with open('scrape_database.csv', 'ab') as scrapefile:\r\n file_writer = csv.writer(scrapefile)\r\n\r\n for i in range(len(master[0])):\r\n file_writer.writerow([x[i] for x in master])\r\n\r\n sort_existing_scrapefile()\r\n \r\n print '\\n', '---------------------------------------------------------'\r\n print 'MODULE: NEW SHARE BUYBACKS FROM STOCKMAVEN.COM.'\r\n print 'Output: ' + str(len(date_list)) + \\\r\n ' buyback(s) added to scrape_database.csv.'\r\n print ' ' + str(not_added) + ' buyback(s) scraped but not added to database'\r\n print 'Run-time:', \"%.2f\" %(time.time() - start_time), 'sec'\r\n print '---------------------------------------------------------' + '\\n'\r\n\r\n\r\n#Read the existing scrapefile into a list for comparison\r\ndef read_existing_scrapefile():\r\n\r\n '''\r\n (file open for reading) -> list of str\r\n\r\n Read and return each row in the scrapefile\r\n comprising date, ticker, and amount of a buyback and return\r\n a list of strings containing this information\r\n\r\n Precondition: the file scrapefile.csv must be available in\r\n the root directory\r\n \r\n '''\r\n\r\n scrape_database = open('scrape_database.csv','r')\r\n\r\n line = scrape_database.readline().strip('\\n')\r\n \r\n existing_database = []\r\n \r\n while line !='':\r\n existing_database.append(str(line))\r\n line = scrape_database.readline().strip('\\n')\r\n\r\n scrape_database.close()\r\n \r\n return existing_database \r\n\r\n# Sort the existing scrapefile by descending dates\r\ndef sort_existing_scrapefile():\r\n '''\r\n\r\n Version update: MSP @ 00:12 29.04.14\r\n \r\n ( ) -> ( )\r\n\r\n Sort the buyback database (scrape_database.csv) by descending dates.\r\n \r\n '''\r\n\r\n c = bb_l.load_buyback_df(-1,-1).T.sort('Date',ascending=False)\r\n d = c.index.tolist()\r\n c['Ticker'] = d\r\n e = c['Date'].tolist()\r\n f = c[['Ticker','Amount']]\r\n f.index = e\r\n f.to_csv('scrape_database.csv', header=False)\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from youtube_transcript_api import YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')
transcript = transcript_list.find_transcript(['en'])
transcript = transcript.fetch()
with open("transcript.txt", 'w') as f:
for line in transcript:
f.write(line['text']+ '\n')
|
normal
|
{
"blob_id": "c2d6e4286e1b9d6dc852bde994da60d353e03e5c",
"index": 8031,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('transcript.txt', 'w') as f:\n for line in transcript:\n f.write(line['text'] + '\\n')\n",
"step-3": "<mask token>\ntranscript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')\ntranscript = transcript_list.find_transcript(['en'])\ntranscript = transcript.fetch()\nwith open('transcript.txt', 'w') as f:\n for line in transcript:\n f.write(line['text'] + '\\n')\n",
"step-4": "from youtube_transcript_api import YouTubeTranscriptApi\ntranscript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')\ntranscript = transcript_list.find_transcript(['en'])\ntranscript = transcript.fetch()\nwith open('transcript.txt', 'w') as f:\n for line in transcript:\n f.write(line['text'] + '\\n')\n",
"step-5": "from youtube_transcript_api import YouTubeTranscriptApi\n\ntranscript_list = YouTubeTranscriptApi.list_transcripts('i8pOulVUz0A')\ntranscript = transcript_list.find_transcript(['en'])\ntranscript = transcript.fetch()\n\nwith open(\"transcript.txt\", 'w') as f:\n for line in transcript:\n f.write(line['text']+ '\\n')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(any(i.isalnum() for i in s))
print(any(i.isalpha() for i in s))
print(any(i.isdigit() for i in s))
print(any(i.islower() for i in s))
print(any(i.isupper() for i in s))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = raw_input()
print(any(i.isalnum() for i in s))
print(any(i.isalpha() for i in s))
print(any(i.isdigit() for i in s))
print(any(i.islower() for i in s))
print(any(i.isupper() for i in s))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""You are given a string .
Your task is to find out if the string contains:
alphanumeric characters, alphabetical characters, digits,
lowercase and uppercase characters."""
s = raw_input()
print(any(i.isalnum()for i in s))
print(any(i.isalpha()for i in s))
print(any(i.isdigit()for i in s))
print(any(i.islower()for i in s))
print(any(i.isupper()for i in s))
""" any() in python returns
True is any of element of the iterable(list,tuple,dict,set etc) are true
to the condition else returns False."""
|
flexible
|
{
"blob_id": "f29fa3d796d9d403d6bf62cb28f5009501c55545",
"index": 3650,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(any(i.isalnum() for i in s))\nprint(any(i.isalpha() for i in s))\nprint(any(i.isdigit() for i in s))\nprint(any(i.islower() for i in s))\nprint(any(i.isupper() for i in s))\n<mask token>\n",
"step-3": "<mask token>\ns = raw_input()\nprint(any(i.isalnum() for i in s))\nprint(any(i.isalpha() for i in s))\nprint(any(i.isdigit() for i in s))\nprint(any(i.islower() for i in s))\nprint(any(i.isupper() for i in s))\n<mask token>\n",
"step-4": "\"\"\"You are given a string .\r\nYour task is to find out if the string contains:\r\nalphanumeric characters, alphabetical characters, digits,\r\nlowercase and uppercase characters.\"\"\"\r\n\r\ns = raw_input()\r\nprint(any(i.isalnum()for i in s))\r\nprint(any(i.isalpha()for i in s))\r\nprint(any(i.isdigit()for i in s))\r\nprint(any(i.islower()for i in s))\r\nprint(any(i.isupper()for i in s))\r\n\r\n\r\n\"\"\" any() in python returns\r\nTrue is any of element of the iterable(list,tuple,dict,set etc) are true\r\nto the condition else returns False.\"\"\"\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import cv2
import numpy as np
kernel = np.ones((3, 3), np.uint8)
def mask(image):
# define region of interest
green_frame = image[50:350, 50:350]
cv2.rectangle(image, (50, 50), (350, 350), (0, 255, 0), 0)
hsv = cv2.cvtColor(green_frame, cv2.COLOR_BGR2HSV)
# define range of skin color in HSV
lower_skin = np.array([0, 20, 70], dtype=np.uint8)
upper_skin = np.array([20, 255, 255], dtype=np.uint8)
# extract skin colur imagw
mask = cv2.inRange(hsv, lower_skin, upper_skin)
# extrapolate the hand to fill dark spots within
mask = cv2.dilate(mask, kernel, iterations=4)
mask = cv2.erode(mask, kernel, iterations=9)
# blur the image
mask = cv2.GaussianBlur(mask, (5, 5), 100)
image = cv2.flip(image, 1)
return mask
|
normal
|
{
"blob_id": "2286aa1581ca7d6282b35847505a904980da275e",
"index": 8659,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mask(image):\n green_frame = image[50:350, 50:350]\n cv2.rectangle(image, (50, 50), (350, 350), (0, 255, 0), 0)\n hsv = cv2.cvtColor(green_frame, cv2.COLOR_BGR2HSV)\n lower_skin = np.array([0, 20, 70], dtype=np.uint8)\n upper_skin = np.array([20, 255, 255], dtype=np.uint8)\n mask = cv2.inRange(hsv, lower_skin, upper_skin)\n mask = cv2.dilate(mask, kernel, iterations=4)\n mask = cv2.erode(mask, kernel, iterations=9)\n mask = cv2.GaussianBlur(mask, (5, 5), 100)\n image = cv2.flip(image, 1)\n return mask\n",
"step-3": "<mask token>\nkernel = np.ones((3, 3), np.uint8)\n\n\ndef mask(image):\n green_frame = image[50:350, 50:350]\n cv2.rectangle(image, (50, 50), (350, 350), (0, 255, 0), 0)\n hsv = cv2.cvtColor(green_frame, cv2.COLOR_BGR2HSV)\n lower_skin = np.array([0, 20, 70], dtype=np.uint8)\n upper_skin = np.array([20, 255, 255], dtype=np.uint8)\n mask = cv2.inRange(hsv, lower_skin, upper_skin)\n mask = cv2.dilate(mask, kernel, iterations=4)\n mask = cv2.erode(mask, kernel, iterations=9)\n mask = cv2.GaussianBlur(mask, (5, 5), 100)\n image = cv2.flip(image, 1)\n return mask\n",
"step-4": "import cv2\nimport numpy as np\nkernel = np.ones((3, 3), np.uint8)\n\n\ndef mask(image):\n green_frame = image[50:350, 50:350]\n cv2.rectangle(image, (50, 50), (350, 350), (0, 255, 0), 0)\n hsv = cv2.cvtColor(green_frame, cv2.COLOR_BGR2HSV)\n lower_skin = np.array([0, 20, 70], dtype=np.uint8)\n upper_skin = np.array([20, 255, 255], dtype=np.uint8)\n mask = cv2.inRange(hsv, lower_skin, upper_skin)\n mask = cv2.dilate(mask, kernel, iterations=4)\n mask = cv2.erode(mask, kernel, iterations=9)\n mask = cv2.GaussianBlur(mask, (5, 5), 100)\n image = cv2.flip(image, 1)\n return mask\n",
"step-5": "import cv2\nimport numpy as np\nkernel = np.ones((3, 3), np.uint8)\ndef mask(image):\n # define region of interest\n green_frame = image[50:350, 50:350]\n cv2.rectangle(image, (50, 50), (350, 350), (0, 255, 0), 0)\n hsv = cv2.cvtColor(green_frame, cv2.COLOR_BGR2HSV)\n # define range of skin color in HSV\n lower_skin = np.array([0, 20, 70], dtype=np.uint8)\n upper_skin = np.array([20, 255, 255], dtype=np.uint8)\n # extract skin colur imagw\n mask = cv2.inRange(hsv, lower_skin, upper_skin)\n # extrapolate the hand to fill dark spots within\n mask = cv2.dilate(mask, kernel, iterations=4)\n mask = cv2.erode(mask, kernel, iterations=9)\n # blur the image\n mask = cv2.GaussianBlur(mask, (5, 5), 100)\n image = cv2.flip(image, 1)\n return mask\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CtrlServer(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems['ctrl'] = self
systems['driver'] = self.driver
self.logger.debug('Systems: {}'.format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info('Control server: {}'.format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = 'Not a JSON message!'
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info('Exiting control server. Bye!')
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug('Received: {}'.format(msg))
try:
msg_type = msg['type']
except KeyError as e:
return msgs.error(e)
if msg_type == 'ping_req':
reply = msgs.ping_reply()
elif msg_type == 'list_req':
reply = self.list_callables()
elif msg_type == 'call_req':
try:
obj_name = msg['obj_name']
method = msg['method']
params = msg['params']
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == 'exit_req':
self.logger.info('Received message to die. Bye!')
reply = msgs.exit_reply()
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = 'Unrecognized message: {}'.format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug('List of callable API objects requested')
callables = {}
for name, obj in self.systems.items():
methods = []
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
<|reserved_special_token_0|>
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception('Exception test')
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = 'Spawned pub server'
self.logger.info(msg)
return msg
else:
err_msg = 'PubServer is already running'
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems['driver'].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CtrlServer(object):
<|reserved_special_token_0|>
def __init__(self, testing=None, config_file='bot/config.yaml'):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
signal.signal(signal.SIGINT, self.signal_handler)
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
if testing is True or testing == 'True':
self.logger.info('CtrlServer running in test mode')
lib.set_testing(True)
elif testing is None:
self.logger.info('Defaulting to config testing flag: {}'.format
(self.config['testing']))
lib.set_testing(self.config['testing'])
else:
self.logger.info('CtrlServer running in non-test mode')
lib.set_testing(False)
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol
=self.config['server_protocol'], host=self.config[
'server_bind_host'], port=self.config['ctrl_server_port'])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error('ZMQ error. Is a server already running?')
self.logger.warning('May be connected to an old server instance.')
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info('Control server initialized')
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')
self.clean_up()
self.logger.info('Cleaned up bot, exiting...')
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems['ctrl'] = self
systems['driver'] = self.driver
self.logger.debug('Systems: {}'.format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info('Control server: {}'.format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = 'Not a JSON message!'
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info('Exiting control server. Bye!')
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug('Received: {}'.format(msg))
try:
msg_type = msg['type']
except KeyError as e:
return msgs.error(e)
if msg_type == 'ping_req':
reply = msgs.ping_reply()
elif msg_type == 'list_req':
reply = self.list_callables()
elif msg_type == 'call_req':
try:
obj_name = msg['obj_name']
method = msg['method']
params = msg['params']
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == 'exit_req':
self.logger.info('Received message to die. Bye!')
reply = msgs.exit_reply()
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = 'Unrecognized message: {}'.format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug('List of callable API objects requested')
callables = {}
for name, obj in self.systems.items():
methods = []
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug('API call: {}.{}({})'.format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
call_return = getattr(obj, method)(**params)
msg = 'Called {}.{}'.format(name, method)
self.logger.debug(msg + ',returned:{}'.format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
err_msg = 'Invalid params for {}.{}'.format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception('Exception test')
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = 'Spawned pub server'
self.logger.info(msg)
return msg
else:
err_msg = 'PubServer is already running'
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems['driver'].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return ismethod(method) and hasattr(method, '__api_call')
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file='bot/config.yaml'):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
signal.signal(signal.SIGINT, self.signal_handler)
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
if testing is True or testing == 'True':
self.logger.info('CtrlServer running in test mode')
lib.set_testing(True)
elif testing is None:
self.logger.info('Defaulting to config testing flag: {}'.format
(self.config['testing']))
lib.set_testing(self.config['testing'])
else:
self.logger.info('CtrlServer running in non-test mode')
lib.set_testing(False)
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol
=self.config['server_protocol'], host=self.config[
'server_bind_host'], port=self.config['ctrl_server_port'])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error('ZMQ error. Is a server already running?')
self.logger.warning('May be connected to an old server instance.')
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info('Control server initialized')
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')
self.clean_up()
self.logger.info('Cleaned up bot, exiting...')
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems['ctrl'] = self
systems['driver'] = self.driver
self.logger.debug('Systems: {}'.format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info('Control server: {}'.format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = 'Not a JSON message!'
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info('Exiting control server. Bye!')
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug('Received: {}'.format(msg))
try:
msg_type = msg['type']
except KeyError as e:
return msgs.error(e)
if msg_type == 'ping_req':
reply = msgs.ping_reply()
elif msg_type == 'list_req':
reply = self.list_callables()
elif msg_type == 'call_req':
try:
obj_name = msg['obj_name']
method = msg['method']
params = msg['params']
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == 'exit_req':
self.logger.info('Received message to die. Bye!')
reply = msgs.exit_reply()
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = 'Unrecognized message: {}'.format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug('List of callable API objects requested')
callables = {}
for name, obj in self.systems.items():
methods = []
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug('API call: {}.{}({})'.format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
call_return = getattr(obj, method)(**params)
msg = 'Called {}.{}'.format(name, method)
self.logger.debug(msg + ',returned:{}'.format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
err_msg = 'Invalid params for {}.{}'.format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception('Exception test')
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = 'Spawned pub server'
self.logger.info(msg)
return msg
else:
err_msg = 'PubServer is already running'
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems['driver'].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == '__main__':
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return ismethod(method) and hasattr(method, '__api_call')
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file='bot/config.yaml'):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
signal.signal(signal.SIGINT, self.signal_handler)
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
if testing is True or testing == 'True':
self.logger.info('CtrlServer running in test mode')
lib.set_testing(True)
elif testing is None:
self.logger.info('Defaulting to config testing flag: {}'.format
(self.config['testing']))
lib.set_testing(self.config['testing'])
else:
self.logger.info('CtrlServer running in non-test mode')
lib.set_testing(False)
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol
=self.config['server_protocol'], host=self.config[
'server_bind_host'], port=self.config['ctrl_server_port'])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error('ZMQ error. Is a server already running?')
self.logger.warning('May be connected to an old server instance.')
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info('Control server initialized')
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')
self.clean_up()
self.logger.info('Cleaned up bot, exiting...')
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems['ctrl'] = self
systems['driver'] = self.driver
self.logger.debug('Systems: {}'.format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info('Control server: {}'.format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = 'Not a JSON message!'
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info('Exiting control server. Bye!')
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug('Received: {}'.format(msg))
try:
msg_type = msg['type']
except KeyError as e:
return msgs.error(e)
if msg_type == 'ping_req':
reply = msgs.ping_reply()
elif msg_type == 'list_req':
reply = self.list_callables()
elif msg_type == 'call_req':
try:
obj_name = msg['obj_name']
method = msg['method']
params = msg['params']
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == 'exit_req':
self.logger.info('Received message to die. Bye!')
reply = msgs.exit_reply()
self.logger.debug('Sending: {}'.format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = 'Unrecognized message: {}'.format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug('List of callable API objects requested')
callables = {}
for name, obj in self.systems.items():
methods = []
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug('API call: {}.{}({})'.format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
call_return = getattr(obj, method)(**params)
msg = 'Called {}.{}'.format(name, method)
self.logger.debug(msg + ',returned:{}'.format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
err_msg = 'Invalid params for {}.{}'.format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception('Exception test')
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = 'Spawned pub server'
self.logger.info(msg)
return msg
else:
err_msg = 'PubServer is already running'
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems['driver'].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == '__main__':
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
<|reserved_special_token_1|>
#!/usr/bin/env python
"""Server that accepts and executes control-type commands on the bot."""
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
# This is required to make imports work
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return (ismethod(method) and hasattr(method, "__api_call"))
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file="bot/config.yaml"):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
# Register signal handler, shut down cleanly (think motors)
signal.signal(signal.SIGINT, self.signal_handler)
# Load configuration and logger
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
# Testing flag will cause objects to run on simulated hardware
if testing is True or testing == "True":
self.logger.info("CtrlServer running in test mode")
lib.set_testing(True)
elif testing is None:
self.logger.info(
"Defaulting to config testing flag: {}".format(
self.config["testing"]))
lib.set_testing(self.config["testing"])
else:
self.logger.info("CtrlServer running in non-test mode")
lib.set_testing(False)
# Build socket to listen for requests
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = "{protocol}://{host}:{port}".format(
protocol=self.config["server_protocol"],
host=self.config["server_bind_host"],
port=self.config["ctrl_server_port"])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error("ZMQ error. Is a server already running?")
self.logger.warning("May be connected to an old server instance.")
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info("Control server initialized")
# Don't spawn pub_server until told to
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly")
self.clean_up()
self.logger.info("Cleaned up bot, exiting...")
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems["ctrl"] = self
systems["driver"] = self.driver
self.logger.debug("Systems: {}".format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info("Control server: {}".format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = "Not a JSON message!"
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info("Exiting control server. Bye!")
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug("Received: {}".format(msg))
try:
msg_type = msg["type"]
except KeyError as e:
return msgs.error(e)
if msg_type == "ping_req":
reply = msgs.ping_reply()
elif msg_type == "list_req":
reply = self.list_callables()
elif msg_type == "call_req":
try:
obj_name = msg["obj_name"]
method = msg["method"]
params = msg["params"]
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == "exit_req":
self.logger.info("Received message to die. Bye!")
reply = msgs.exit_reply()
# Need to actually send reply here as we're about to exit
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = "Unrecognized message: {}".format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug("List of callable API objects requested")
# Dict of subsystem object names to their callable methods.
callables = {}
for name, obj in self.systems.items():
methods = []
# Filter out methods which are not explicitly flagged for export
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug("API call: {}.{}({})".format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
# Calls given obj.method, unpacking and passing params dict
call_return = getattr(obj, method)(**params)
msg = "Called {}.{}".format(name, method)
self.logger.debug(msg + ",returned:{}".format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
# Raised when we have a mismatch of the method's kwargs
# TODO: Return argspec here?
err_msg = "Invalid params for {}.{}".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
# Catch exception raised by called method, notify client
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception("Exception test")
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
# Prevent pub_server thread from blocking the process from closing
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = "Spawned pub server"
self.logger.info(msg)
return msg
else:
err_msg = "PubServer is already running"
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems["driver"].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == "__main__":
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
|
flexible
|
{
"blob_id": "ddb81e3ce0df44ee503c558b68b41c35935358a0",
"index": 8663,
"step-1": "<mask token>\n\n\nclass CtrlServer(object):\n <mask token>\n <mask token>\n <mask token>\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n <mask token>\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CtrlServer(object):\n <mask token>\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return ismethod(method) and hasattr(method, '__api_call')\n\n\nclass CtrlServer(object):\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n",
"step-4": "<mask token>\nimport sys\nimport os\nfrom inspect import getmembers, ismethod\nfrom simplejson.decoder import JSONDecodeError\nimport zmq\nimport signal\nsys.path = [os.getcwd()] + sys.path\nimport bot.lib.lib as lib\nimport pub_server as pub_server_mod\nimport bot.lib.messages as msgs\nfrom bot.driver.mec_driver import MecDriver\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return ismethod(method) and hasattr(method, '__api_call')\n\n\nclass CtrlServer(object):\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"Server that accepts and executes control-type commands on the bot.\"\"\"\n\nimport sys\nimport os\nfrom inspect import getmembers, ismethod\nfrom simplejson.decoder import JSONDecodeError\nimport zmq\nimport signal\n\n# This is required to make imports work\nsys.path = [os.getcwd()] + sys.path\n\nimport bot.lib.lib as lib\nimport pub_server as pub_server_mod\nimport bot.lib.messages as msgs\n\nfrom bot.driver.mec_driver import MecDriver\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return (ismethod(method) and hasattr(method, \"__api_call\"))\n\n\nclass CtrlServer(object):\n\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file=\"bot/config.yaml\"):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n # Register signal handler, shut down cleanly (think motors)\n signal.signal(signal.SIGINT, self.signal_handler)\n\n # Load configuration and logger\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n\n # Testing flag will cause objects to run on simulated hardware\n if testing is True or testing == \"True\":\n self.logger.info(\"CtrlServer running in test mode\")\n lib.set_testing(True)\n elif testing is None:\n self.logger.info(\n \"Defaulting to config testing flag: {}\".format(\n self.config[\"testing\"]))\n lib.set_testing(self.config[\"testing\"])\n else:\n self.logger.info(\"CtrlServer running in non-test mode\")\n lib.set_testing(False)\n\n # Build socket to listen for requests\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = \"{protocol}://{host}:{port}\".format(\n protocol=self.config[\"server_protocol\"],\n host=self.config[\"server_bind_host\"],\n port=self.config[\"ctrl_server_port\"])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error(\"ZMQ error. Is a server already running?\")\n self.logger.warning(\"May be connected to an old server instance.\")\n sys.exit(1)\n\n self.systems = self.assign_subsystems()\n self.logger.info(\"Control server initialized\")\n\n # Don't spawn pub_server until told to\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info(\"Caught SIGINT (Ctrl+C), closing cleanly\")\n self.clean_up()\n self.logger.info(\"Cleaned up bot, exiting...\")\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n\n self.driver = MecDriver()\n\n systems = {}\n systems[\"ctrl\"] = self\n systems[\"driver\"] = self.driver\n\n self.logger.debug(\"Systems: {}\".format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info(\"Control server: {}\".format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = \"Not a JSON message!\"\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info(\"Exiting control server. Bye!\")\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug(\"List of callable API objects requested\")\n # Dict of subsystem object names to their callable methods.\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n # Filter out methods which are not explicitly flagged for export\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug(\"API call: {}.{}({})\".format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n # Calls given obj.method, unpacking and passing params dict\n call_return = getattr(obj, method)(**params)\n msg = \"Called {}.{}\".format(name, method)\n self.logger.debug(msg + \",returned:{}\".format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n # Raised when we have a mismatch of the method's kwargs\n # TODO: Return argspec here?\n err_msg = \"Invalid params for {}.{}\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n # Catch exception raised by called method, notify client\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception(\"Exception test\")\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n # Prevent pub_server thread from blocking the process from closing\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = \"Spawned pub server\"\n self.logger.info(msg)\n return msg\n else:\n err_msg = \"PubServer is already running\"\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems[\"driver\"].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n",
"step-ids": [
10,
13,
16,
18,
19
]
}
|
[
10,
13,
16,
18,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while lic_plate != 'A999AA':
if int(speed) > 60:
if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:
salary += 1000
elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:
salary += 500
elif lic_plate[2] == lic_plate[3]:
salary += 500
else:
salary += 100
speed, lic_plate = input().split()
print(salary)
<|reserved_special_token_1|>
speed, lic_plate = input().split()
salary = int(0)
while lic_plate != 'A999AA':
if int(speed) > 60:
if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:
salary += 1000
elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:
salary += 500
elif lic_plate[2] == lic_plate[3]:
salary += 500
else:
salary += 100
speed, lic_plate = input().split()
print(salary)
<|reserved_special_token_1|>
speed, lic_plate = input().split()
salary = int(0)
while lic_plate != "A999AA":
if int(speed) > 60:
if lic_plate[1] == lic_plate[2] and lic_plate [2] == lic_plate[3]:
salary += 1000
elif lic_plate[1] == lic_plate[2] or lic_plate [1] == lic_plate[3]:
salary += 500
elif lic_plate[2] == lic_plate[3]:
salary += 500
else:
salary += 100
speed, lic_plate = input().split()
print(salary)
|
flexible
|
{
"blob_id": "ff8ffeb418bf4f9bc7d5dadd126ebc7c34c5c2cd",
"index": 4454,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile lic_plate != 'A999AA':\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n",
"step-3": "speed, lic_plate = input().split()\nsalary = int(0)\nwhile lic_plate != 'A999AA':\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n",
"step-4": "speed, lic_plate = input().split()\nsalary = int(0)\nwhile lic_plate != \"A999AA\":\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate [2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate [1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
msg("""
Проверка на наличие подпапок исходной папки в выходной:""")
for folder in input_folders:
if folder in output_folders:
warning(' ' + folder)
else:
error(' ' + folder)
msg("""
Проверка на наличие подпапок выходной папки в исходной:""")
<|reserved_special_token_0|>
for folder in output_folders:
if folder in input_folders:
warning(' ' + folder)
else:
remove_list.append(folder)
error(' ' + folder)
for folder in remove_list:
output_folders.pop(folder, None)
msg("""
Копирование файлов в папки...""")
<|reserved_special_token_0|>
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder,
subfolders, '*.TAB'))]
if not tab_files:
remove_list.append(subfolders)
if u'Импорт' in subfolders:
continue
else:
similar_output_folder = join(output_folder, subfolders)
msg(' ' + subfolders)
files_to_copy = [copy_file for copy_file in get_files(join(input_folder,
subfolders, '*.*'))]
for file_to_copy in files_to_copy:
_, file_extension = splitext(file_to_copy)
if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',
'.IND', '.MAP']:
msg(' ' + file_to_copy)
copy(file_to_copy, similar_output_folder)
for folder in remove_list:
output_folders.pop(folder, None)
output_folders.pop('', None)
msg("""
Создание баз данных...""")
for output_subfolders in output_folders:
mdb_name = basename(output_subfolders)
mdb_local_path = join(output_subfolders, mdb_name + '.mdb')
if enable_rewrite_databases == 'true':
a.Delete_management(join(output_folder, output_subfolders, mdb_name +
'.mdb'))
try:
a.CreatePersonalGDB_management(join(output_folder,
output_subfolders), mdb_name + '.mdb')
msg(' ' + mdb_local_path)
except a.ExecuteError:
warning(' ' + mdb_local_path)
msg("""
Конвертация TAB в слои...""")
<|reserved_special_token_0|>
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder,
subfolders, '*.TAB'))]
for tab_file_path in tab_files:
for layer_type in layer_types:
tab_name = basename(tab_file_path).replace('.TAB', '')
layer_from_name = tab_name + ' ' + layer_type
layer_from = join(tab_file_path, layer_from_name)
a.Exists(layer_from)
if not a.Exists(layer_from):
continue
layer_to_name = layer_from_name.replace(' ', '_')
if layer_to_name[0].isdigit():
layer_to_name = 'L' + layer_to_name
layer_to = join(output_folder, subfolders, basename(subfolders) +
'.mdb', layer_to_name)
local_tab_path = join(subfolders, tab_name + '.TAB')
if a.Exists(layer_to) and enable_rewrite_tabs == 'true':
a.Delete_management(layer_to)
msg(u' ' + local_tab_path + ' ' + layer_type)
elif a.Exists(layer_to):
warning(u' ' + local_tab_path + ' ' + layer_type)
continue
elif not a.Exists(layer_to):
msg(u' ' + local_tab_path + ' ' + layer_type)
try:
a.CopyFeatures_management(layer_from, layer_to)
except:
try:
a.CopyRows_management(layer_from, layer_to)
except Exception as e:
error(
' Ошибка. Копирование объектов/строк не сработало:'
+ str(e))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input_folder = a.GetParameterAsText(0)
output_folder = a.GetParameterAsText(1)
enable_rewrite_databases = a.GetParameterAsText(2)
enable_rewrite_tabs = a.GetParameterAsText(3)
input_folders_order = [root.replace(input_folder + '\\', '') for root, dirs,
_ in walk(input_folder)]
output_folders_order = [root.replace(output_folder + '\\', '') for root,
dirs, _ in walk(output_folder)]
input_folders_unordered_dict = {root.replace(input_folder + '\\', ''): dirs for
root, dirs, _ in walk(input_folder)}
output_folders_unordered_dict = {root.replace(output_folder + '\\', ''):
dirs for root, dirs, _ in walk(output_folder)}
input_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in
input_folders_order)
output_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in
output_folders_order)
msg("""
Проверка на наличие подпапок исходной папки в выходной:""")
for folder in input_folders:
if folder in output_folders:
warning(' ' + folder)
else:
error(' ' + folder)
msg("""
Проверка на наличие подпапок выходной папки в исходной:""")
remove_list = []
for folder in output_folders:
if folder in input_folders:
warning(' ' + folder)
else:
remove_list.append(folder)
error(' ' + folder)
for folder in remove_list:
output_folders.pop(folder, None)
msg("""
Копирование файлов в папки...""")
remove_list = []
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder,
subfolders, '*.TAB'))]
if not tab_files:
remove_list.append(subfolders)
if u'Импорт' in subfolders:
continue
else:
similar_output_folder = join(output_folder, subfolders)
msg(' ' + subfolders)
files_to_copy = [copy_file for copy_file in get_files(join(input_folder,
subfolders, '*.*'))]
for file_to_copy in files_to_copy:
_, file_extension = splitext(file_to_copy)
if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',
'.IND', '.MAP']:
msg(' ' + file_to_copy)
copy(file_to_copy, similar_output_folder)
for folder in remove_list:
output_folders.pop(folder, None)
output_folders.pop('', None)
msg("""
Создание баз данных...""")
for output_subfolders in output_folders:
mdb_name = basename(output_subfolders)
mdb_local_path = join(output_subfolders, mdb_name + '.mdb')
if enable_rewrite_databases == 'true':
a.Delete_management(join(output_folder, output_subfolders, mdb_name +
'.mdb'))
try:
a.CreatePersonalGDB_management(join(output_folder,
output_subfolders), mdb_name + '.mdb')
msg(' ' + mdb_local_path)
except a.ExecuteError:
warning(' ' + mdb_local_path)
msg("""
Конвертация TAB в слои...""")
layer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder,
subfolders, '*.TAB'))]
for tab_file_path in tab_files:
for layer_type in layer_types:
tab_name = basename(tab_file_path).replace('.TAB', '')
layer_from_name = tab_name + ' ' + layer_type
layer_from = join(tab_file_path, layer_from_name)
a.Exists(layer_from)
if not a.Exists(layer_from):
continue
layer_to_name = layer_from_name.replace(' ', '_')
if layer_to_name[0].isdigit():
layer_to_name = 'L' + layer_to_name
layer_to = join(output_folder, subfolders, basename(subfolders) +
'.mdb', layer_to_name)
local_tab_path = join(subfolders, tab_name + '.TAB')
if a.Exists(layer_to) and enable_rewrite_tabs == 'true':
a.Delete_management(layer_to)
msg(u' ' + local_tab_path + ' ' + layer_type)
elif a.Exists(layer_to):
warning(u' ' + local_tab_path + ' ' + layer_type)
continue
elif not a.Exists(layer_to):
msg(u' ' + local_tab_path + ' ' + layer_type)
try:
a.CopyFeatures_management(layer_from, layer_to)
except:
try:
a.CopyRows_management(layer_from, layer_to)
except Exception as e:
error(
' Ошибка. Копирование объектов/строк не сработало:'
+ str(e))
<|reserved_special_token_1|>
import arcpy as a
from arcpy import AddMessage as msg, AddWarning as warning, AddError as error
from os import mkdir, walk
from os.path import join, dirname, basename, splitext
from glob import glob as get_files
from shutil import copy
from collections import OrderedDict
input_folder = a.GetParameterAsText(0)
output_folder = a.GetParameterAsText(1)
enable_rewrite_databases = a.GetParameterAsText(2)
enable_rewrite_tabs = a.GetParameterAsText(3)
input_folders_order = [root.replace(input_folder + '\\', '') for root, dirs,
_ in walk(input_folder)]
output_folders_order = [root.replace(output_folder + '\\', '') for root,
dirs, _ in walk(output_folder)]
input_folders_unordered_dict = {root.replace(input_folder + '\\', ''): dirs for
root, dirs, _ in walk(input_folder)}
output_folders_unordered_dict = {root.replace(output_folder + '\\', ''):
dirs for root, dirs, _ in walk(output_folder)}
input_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in
input_folders_order)
output_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in
output_folders_order)
msg("""
Проверка на наличие подпапок исходной папки в выходной:""")
for folder in input_folders:
if folder in output_folders:
warning(' ' + folder)
else:
error(' ' + folder)
msg("""
Проверка на наличие подпапок выходной папки в исходной:""")
remove_list = []
for folder in output_folders:
if folder in input_folders:
warning(' ' + folder)
else:
remove_list.append(folder)
error(' ' + folder)
for folder in remove_list:
output_folders.pop(folder, None)
msg("""
Копирование файлов в папки...""")
remove_list = []
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder,
subfolders, '*.TAB'))]
if not tab_files:
remove_list.append(subfolders)
if u'Импорт' in subfolders:
continue
else:
similar_output_folder = join(output_folder, subfolders)
msg(' ' + subfolders)
files_to_copy = [copy_file for copy_file in get_files(join(input_folder,
subfolders, '*.*'))]
for file_to_copy in files_to_copy:
_, file_extension = splitext(file_to_copy)
if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',
'.IND', '.MAP']:
msg(' ' + file_to_copy)
copy(file_to_copy, similar_output_folder)
for folder in remove_list:
output_folders.pop(folder, None)
output_folders.pop('', None)
msg("""
Создание баз данных...""")
for output_subfolders in output_folders:
mdb_name = basename(output_subfolders)
mdb_local_path = join(output_subfolders, mdb_name + '.mdb')
if enable_rewrite_databases == 'true':
a.Delete_management(join(output_folder, output_subfolders, mdb_name +
'.mdb'))
try:
a.CreatePersonalGDB_management(join(output_folder,
output_subfolders), mdb_name + '.mdb')
msg(' ' + mdb_local_path)
except a.ExecuteError:
warning(' ' + mdb_local_path)
msg("""
Конвертация TAB в слои...""")
layer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder,
subfolders, '*.TAB'))]
for tab_file_path in tab_files:
for layer_type in layer_types:
tab_name = basename(tab_file_path).replace('.TAB', '')
layer_from_name = tab_name + ' ' + layer_type
layer_from = join(tab_file_path, layer_from_name)
a.Exists(layer_from)
if not a.Exists(layer_from):
continue
layer_to_name = layer_from_name.replace(' ', '_')
if layer_to_name[0].isdigit():
layer_to_name = 'L' + layer_to_name
layer_to = join(output_folder, subfolders, basename(subfolders) +
'.mdb', layer_to_name)
local_tab_path = join(subfolders, tab_name + '.TAB')
if a.Exists(layer_to) and enable_rewrite_tabs == 'true':
a.Delete_management(layer_to)
msg(u' ' + local_tab_path + ' ' + layer_type)
elif a.Exists(layer_to):
warning(u' ' + local_tab_path + ' ' + layer_type)
continue
elif not a.Exists(layer_to):
msg(u' ' + local_tab_path + ' ' + layer_type)
try:
a.CopyFeatures_management(layer_from, layer_to)
except:
try:
a.CopyRows_management(layer_from, layer_to)
except Exception as e:
error(
' Ошибка. Копирование объектов/строк не сработало:'
+ str(e))
<|reserved_special_token_1|>
# -*- coding: cp1251 -*-
import arcpy as a
from arcpy import AddMessage as msg, AddWarning as warning, AddError as error
from os import mkdir, walk
from os.path import join, dirname, basename, splitext
from glob import glob as get_files
from shutil import copy
from collections import OrderedDict
input_folder = a.GetParameterAsText(0)
output_folder = a.GetParameterAsText(1)
enable_rewrite_databases = a.GetParameterAsText(2)
enable_rewrite_tabs = a.GetParameterAsText(3)
input_folders_order = [root.replace(input_folder + '\\', '') for root, dirs, _ in walk(input_folder)]
output_folders_order = [root.replace(output_folder + '\\', '') for root, dirs, _ in walk(output_folder)]
input_folders_unordered_dict = {root.replace(input_folder + '\\', ''):dirs for root, dirs, _ in walk(input_folder)}
output_folders_unordered_dict = {root.replace(output_folder + '\\', ''):dirs for root, dirs, _ in walk(output_folder)}
input_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in input_folders_order)
output_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in output_folders_order)
msg("\nПроверка на наличие подпапок исходной папки в выходной:")
for folder in input_folders:
if folder in output_folders:
warning(' ' + folder)
else:
error(' ' + folder)
msg("\nПроверка на наличие подпапок выходной папки в исходной:")
remove_list = []
for folder in output_folders:
if folder in input_folders:
warning(' ' + folder)
else:
remove_list.append(folder)
error(' ' + folder)
for folder in remove_list:
output_folders.pop(folder, None)
msg("\nКопирование файлов в папки...")
remove_list = []
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
if not tab_files:
remove_list.append(subfolders)
if u"Импорт" in subfolders:
continue
else:
similar_output_folder = join(output_folder, subfolders)
msg(' ' + subfolders)
files_to_copy = [copy_file for copy_file in get_files(join(input_folder, subfolders, "*.*"))]
for file_to_copy in files_to_copy:
_, file_extension = splitext(file_to_copy)
if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID', '.IND', '.MAP']:
msg(' ' + file_to_copy)
copy(file_to_copy, similar_output_folder)
for folder in remove_list:
output_folders.pop(folder, None)
output_folders.pop('', None)
msg("\nСоздание баз данных...")
for output_subfolders in output_folders:
mdb_name = basename(output_subfolders)
mdb_local_path = join(output_subfolders, mdb_name + ".mdb")
if enable_rewrite_databases == 'true':
a.Delete_management(join(output_folder, output_subfolders, mdb_name + ".mdb"))
try:
a.CreatePersonalGDB_management(join(output_folder, output_subfolders), mdb_name + ".mdb")
msg(" " + mdb_local_path)
except a.ExecuteError:
warning(" " + mdb_local_path)
msg("\nКонвертация TAB в слои...")
layer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
for tab_file_path in tab_files:
for layer_type in layer_types:
tab_name = basename(tab_file_path).replace('.TAB', '')
layer_from_name = tab_name + ' ' + layer_type
layer_from = join(tab_file_path, layer_from_name)
a.Exists(layer_from)
if not a.Exists(layer_from):
continue
layer_to_name = layer_from_name.replace(' ', '_')
if layer_to_name[0].isdigit():
layer_to_name = 'L' + layer_to_name
layer_to = join(output_folder, subfolders, basename(subfolders) + '.mdb', layer_to_name)
local_tab_path = join(subfolders, tab_name + '.TAB')
if a.Exists(layer_to) and enable_rewrite_tabs == 'true':
a.Delete_management(layer_to)
msg(u' ' + local_tab_path + ' ' + layer_type)
elif a.Exists(layer_to):
warning(u' ' + local_tab_path + ' ' + layer_type)
continue
elif not a.Exists(layer_to):
msg(u' ' + local_tab_path + ' ' + layer_type)
try:
a.CopyFeatures_management(layer_from, layer_to)
except:
try:
a.CopyRows_management(layer_from, layer_to)
except Exception as e:
error(' Ошибка. Копирование объектов/строк не сработало:' + str(e))
|
flexible
|
{
"blob_id": "409e0fc0b1c1d86c5526d33ba271a8387eecf748",
"index": 9872,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\n<mask token>\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\n<mask token>\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\n<mask token>\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-3": "<mask token>\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs,\n _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root,\n dirs, _ in walk(output_folder)]\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''): dirs for\n root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):\n dirs for root, dirs, _ in walk(output_folder)}\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in\n input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in\n output_folders_order)\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-4": "import arcpy as a\nfrom arcpy import AddMessage as msg, AddWarning as warning, AddError as error\nfrom os import mkdir, walk\nfrom os.path import join, dirname, basename, splitext\nfrom glob import glob as get_files\nfrom shutil import copy\nfrom collections import OrderedDict\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs,\n _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root,\n dirs, _ in walk(output_folder)]\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''): dirs for\n root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):\n dirs for root, dirs, _ in walk(output_folder)}\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in\n input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in\n output_folders_order)\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-5": "# -*- coding: cp1251 -*-\nimport arcpy as a\nfrom arcpy import AddMessage as msg, AddWarning as warning, AddError as error\n\nfrom os import mkdir, walk\nfrom os.path import join, dirname, basename, splitext\nfrom glob import glob as get_files\nfrom shutil import copy\nfrom collections import OrderedDict\n\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\n\n\n\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs, _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root, dirs, _ in walk(output_folder)]\n\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''):dirs for root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):dirs for root, dirs, _ in walk(output_folder)}\n\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in output_folders_order)\n\nmsg(\"\\nПроверка на наличие подпапок исходной папки в выходной:\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\n\nmsg(\"\\nПроверка на наличие подпапок выходной папки в исходной:\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\n\nfor folder in remove_list:\n output_folders.pop(folder, None)\n\n\n\nmsg(\"\\nКопирование файлов в папки...\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, \"*.TAB\"))]\n if not tab_files:\n remove_list.append(subfolders)\n\n if u\"Импорт\" in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n\n msg(' ' + subfolders)\n\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder, subfolders, \"*.*\"))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID', '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\n\nfor folder in remove_list:\n output_folders.pop(folder, None)\n\noutput_folders.pop('', None)\n\n\n\nmsg(\"\\nСоздание баз данных...\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + \".mdb\")\n\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name + \".mdb\"))\n\n try:\n a.CreatePersonalGDB_management(join(output_folder, output_subfolders), mdb_name + \".mdb\")\n msg(\" \" + mdb_local_path)\n except a.ExecuteError:\n warning(\" \" + mdb_local_path)\n\n\n\nmsg(\"\\nКонвертация TAB в слои...\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\n\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, \"*.TAB\"))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n\n a.Exists(layer_from)\n\n if not a.Exists(layer_from):\n continue\n\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) + '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(' Ошибка. Копирование объектов/строк не сработало:' + str(e))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
topping = None
while topping != 'quit':
if topping:
print("I'll add %s to your pizza!" % topping)
topping = input(
"What topping would you like? (enter 'quit' when you are done.) ")
<|reserved_special_token_1|>
"""
7-4. Pizza Toppings: Write a loop that prompts the user to enter a series of
pizza toppings until they enter a 'quit' value. As they enter each topping,
print a message saying you’ll add that topping to their pizza.
"""
if __name__ == '__main__':
topping = None
while topping != "quit":
if topping:
print("I'll add %s to your pizza!" % topping)
topping = input("What topping would you like? (enter 'quit' when you are done.) ")
|
flexible
|
{
"blob_id": "4d07795543989fe481e1141756f988d276f82c02",
"index": 5348,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n topping = None\n while topping != 'quit':\n if topping:\n print(\"I'll add %s to your pizza!\" % topping)\n topping = input(\n \"What topping would you like? (enter 'quit' when you are done.) \")\n",
"step-3": "\"\"\"\n7-4. Pizza Toppings: Write a loop that prompts the user to enter a series of\npizza toppings until they enter a 'quit' value. As they enter each topping,\nprint a message saying you’ll add that topping to their pizza.\n\"\"\"\nif __name__ == '__main__':\n topping = None\n while topping != \"quit\":\n if topping:\n print(\"I'll add %s to your pizza!\" % topping)\n topping = input(\"What topping would you like? (enter 'quit' when you are done.) \")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Proyecto SA^3
Autor: Mario Lopez
Luis Aviles
Joaquin V
Fecha: Octubre del 2012
versión: 1
"""
#Manejo de temlates en el HTML
import jinja2
from jinja2 import Environment, PackageLoader
import os
import cgi
import datetime
import urllib
# for hashing
import hashlib
#Layer de comunicacion con Modelo
from modelo.Layer import *
from modelo.Layer2 import *
#Framework de Web para Python
import webapp2
# API DataStore
from google.appengine.ext import db
# intitalization of template system. It says that HTML templates will
# be found in current directory ("__file__")
# variable env para sesiones
env = Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
env.filters['format_time'] = format_time
# Método para verificar si hay una sesión activa
def before_filter(fn):
def inner_function(self):
if not 'session' in env.globals:
self.redirect('/')
return fn(self)
return inner_function
"""
REQUEST HANDLERS
"""
class MainPage(webapp2.RequestHandler):
"""Pantalla inicial. Despliega una forma para iniciar sesión
"""
def get(self):
self.response.headers['Content-Type'] = 'text/html'
# Generar el admin
matricula = 'admin'
password = 'admin'
nombre = 'admin'
apellidop = 'admin'
apellidom = 'admin'
tipo = 'admin'
# Generar password
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
#Usuario(matricula = matricula, password = password, nombre = nombre, apellidop = apellidop, apellidom = apellidom, tipo = tipo).put()
#productos = db.GqlQuery("SELECT * FROM Inventario")
#Desplegar lista de productos
_despliegaLogin(self, '/vistas/login.html')
class VerUsuarios(webapp2.RequestHandler):
""" Despliega los usuarios registrados
"""
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
usuarios = db.GqlQuery("SELECT * FROM Usuario")
_despliegaVerUsuarios(self, usuarios, '/vistas/verUsuarios.html')
class RegistroAlumno(webapp2.RequestHandler):
""" Formulario para registrar usuarios
"""
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinicas = db.GqlQuery("SELECT * FROM Clinica")
_despliegaRegistroAlumno(self, clinicas, '/vistas/registroAlumno.html')
class GrabaAlumno(webapp2.RequestHandler):
def post(self):
nombre = self.request.get('nombre')
matricula = self.request.get('matricula')
password = self.request.get('password')
# Generar password
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
class RegistraUsuario(webapp2.RequestHandler):
""" Formulario para registrar usuarios
"""
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaRegistraUsuario(self, '/vistas/registraUsuario.html')
class GrabaUsuario(webapp2.RequestHandler):
def post(self):
nombre = self.request.get('nombre')
matricula = self.request.get('matricula')
password = self.request.get('password')
apellidop = self.request.get('apellidop')
apellidom = self.request.get('apellidom')
tipo = self.request.get('tipo')
# Generar password
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
grabaUsuario(matricula,password,nombre,apellidop,apellidom,tipo)
self.redirect('/verUsuarios')
class IniciaSesion(webapp2.RequestHandler):
""" Entrada: al dar click en iniciar sesión en la pantalla principal
Salida: se crea la sesión del usuario y lo redirige a....
"""
def post(self):
self.response.headers['Content-Type'] = 'text/html'
matricula = self.request.get('matricula')
password = self.request.get('password')
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
user = db.GqlQuery("SELECT * FROM Usuario WHERE matricula = '" + matricula + "' AND password = '" + password + "'")
if user.count() == 1:
for u in user:
user = []
user.append(u.nombre)
user.append(u.matricula)
user.append(u.tipo)
user.append(u.key())
env.globals['session'] = user
self.redirect('/bienvenida')
else:
self.redirect('/')
class CerrarSesion(webapp2.RequestHandler):
""" Entrada: al dar click en cerrar sesión
Salida: se elimina la sesión actual y se
redirige a la pantalla para iniciar sesión
"""
def get(self):
del env.globals['session']
self.redirect('/')
class Bienvenida(webapp2.RequestHandler):
""" Pantalla que se muestra al iniciar sesion
"""
@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaBienvenida(self, '/vistas/bienvenida.html')
class AgregaHorarioClinica(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinicas = getAllClinicas()
_despliegaAgregaHorarioClinica(self,clinicas, '/vistas/agregarHorarioClinica.html')
#=======================================Funciones de Clinica
class AgregarClinica(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarClinica(self, '/vistas/Clinica/agregarClinica.html')
class GrabaClinica(webapp2.RequestHandler):
def post(self):
key = self.request.get('key')
nombre = self.request.get('nombre')
descripcion = self.request.get('descripcion')
localizacion = self.request.get('localizacion')
unidades = int(self.request.get('unidades'))
defectuosas = int(self.request.get('defectuosas'))
if(key == None or key ==""):
grabaClinica(nombre,descripcion,localizacion,unidades,defectuosas)
else:
actualizaClinica(key,nombre,descripcion,localizacion,unidades,defectuosas)
self.redirect('/verClinicas') #Redireccion a la vista de clinicas
class EliminaClinica(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
eliminaClinica(key)
self.redirect('/verClinicas') #Redireccion a las clinicas
class VerClinicas(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
time.sleep(.1)
clinicas = getAllClinicas()
_despliegaVerClinicas(self, clinicas, '/vistas/Clinica/verClinicas.html')
class EditaClinica(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinica = db.get(self.request.get('key'))
_despliegaEditaClinica(self, clinica, '/vistas/Clinica/editaClinica.html')
#=======================================Fin de manejos de Clinicas
#=======================================Inicia Manejo de Grupos
class AgregarGrupo(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarGrupo(self,self.request.get('key'), '/vistas/Grupo/agregarGrupo.html')
class GrabarGrupo(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
key = self.request.get('key')
clinica = self.request.get('clinica')
nombre = self.request.get('nombre')
descripcion = self.request.get('descripcion')
inicioAgenda = self.request.get('inicioAgenda')
finAgenda = self.request.get('finAgenda')
fa = self.request.get('fa')
if(key == None or key == ""):
grabaGrupo(clinica,nombre,descripcion,inicioAgenda,finAgenda,fa)
else:
actualizaGrupo(key,nombre,descripcion,inicioAgenda,finAgenda,fa)
self.redirect('/verGrupos?key='+clinica) #Redireccion a la vista de Grupos de una Clinica
class EliminarGrupo(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
eliminaGrupo(key)
self.redirect('/verGrupos?key='+self.request.get('clinica')) #Redireccion a la vista de los Grupos
class VerGrupos(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinica = getObject(self.request.get('key'))
_despliegaVerGrupos(self,clinica, getGrupos(clinica.key()), '/vistas/Grupo/verGrupos.html')
class EditarGrupo(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
grupo = db.get(self.request.get('key'))
clinica = self.request.get('clinica')
_despliegaEditaGrupo(self, clinica, grupo, '/vistas/Grupo/editaGrupo.html')
#=======================================Fin de manejo de Grupos
#=======================================Inicia Manejo de Asignacion de Grupo
class UsuariosAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
usuarios = getAllUsuarios()
_despliegaUsuariosAsignacion(self,usuarios,'/vistas/Asignacion/verUsuarios.html')
class ClinicasAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
clinicas = getAllClinicas()
usuario = getObject(self.request.get('usuario'))
_despliegaClinicasAsignacion(self,usuario,clinicas,'/vistas/Asignacion/verClinicas.html')
class GruposAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
clinica = getObject(self.request.get('clinica'))
usuario = getObject(self.request.get('usuario'))
_despliegaGruposAsignacion(self,usuario,clinica,'/vistas/Asignacion/verGrupos.html')
class GuardaAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
grupo = self.request.get('grupo')
usuario = self.request.get('usuario')
#Crea la asignacion entre ambos objetos
creaAsignacion(usuario,grupo)
_despliegaExito(self,"Usuario Asignado Correctamente",'/asignaUsuarios1','/vistas/Exito.html')
#=======================================Fin de Manejo de Asignacion de Grupo
class AgregarHorario(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarHorario(self,self.request.get('key'), '/vistas/Horario/agregarHorario.html')
class GrabarHorario(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
key = self.request.get('key')
grupo = self.request.get('grupo')
descripcion = self.request.get('descripcion')
dia = self.request.get('dia')
horaInicio = self.request.get('horaInicio')
horaFin = self.request.get('horaFin')
if(key == None or key == ""):
grabaHorario(grupo,descripcion,dia,horaInicio,horaFin)
else:
actualizaGrupo(key,descripcion,dia,horaInicio,horaFin)
self.redirect('/verHorarios?key='+grupo) #Redireccion a la vista de Grupos de una Clinica
class EliminarHorario(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
eliminaHorario(key)
self.redirect('/verHorarios?key='+self.request.get('grupo')) #Redireccion a la vista de Horarios
class VerHorarios(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
#horarios = getAllHorarios(self.request.get('key'))
grupo = getObject(self.request.get('key'))
_despliegaVerHorarios(self,grupo, getHorarios(grupo), '/vistas/Horario/verHorarios.html')
class EditarHorario(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
horario = db.get(self.request.get('key'))
grupo = self.request.get('grupo')
_despliegaEditaHorario(self, grupo, horario, '/vistas/Horario/editaHorario.html')
#=======================================Fin de manejo de Horario
class EliminaUsuario(webapp2.RequestHandler):
def get(self):
usuarioKey = self.request.get('key')
deleteUsuario(usuarioKey)
self.redirect('/verUsuarios')
class EditaUsuario(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
usuarioKey = self.request.get('key')
usuario = getUsuario(usuarioKey);
_despliegaEditaUsuario(self, usuario, '/vistas/editaUsuario.html')
class GuardaCambiosUsuario(webapp2.RequestHandler):
def post(self):
usuarioKey = self.request.get('usuarioKey')
nombre = self.request.get('nombre')
matricula = self.request.get('matricula')
apellidop = self.request.get('apellidop')
apellidom = self.request.get('apellidom')
tipo = self.request.get('tipo')
usuario = getUsuario(usuarioKey);
updateUsuario(usuario,nombre,matricula,apellidop,apellidom,tipo)
self.redirect('/verUsuarios')
#====================================Inicia Proceso de Agendas
class AgendaPacienteExample(webapp2.RequestHandler):
def get(self):
horario = self.request.get('horario')
disponible = verificaDisponibilidadExample(horario)
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write('Total:<br/>')
self.response.out.write(disponible)
class AgendaPaciente(webapp2.RequestHandler):
def post(self):
horario = self.request.get('horario')
descripcion = self.request.get('descripcion')
folio = self.request.get('folio')
usuario = env.globals.get('session')[3]
disponible = verificaDisponibilidad(horario,usuario,descripcion,folio)
self.response.headers['Content-Type'] = 'text/html'
if (disponible[1] == True):
_despliegaExito(self,"El usuario ha agendado correctamente (No."+str(disponible[0])+")",'/verHorariosUsuario','/vistas/Exito.html')
else:
_despliegaError(self,"Agenda Llena ("+str(disponible[0])+" Pacientes), no es posible agendar",'/verHorariosUsuario','/vistas/Error.html')
class VerFormaCita(webapp2.RequestHandler):
def get(self):
horario = self.request.get('horario')
self.response.headers['Content-Type'] = 'text/html'
_despliegaFormaCita(self,horario,'/vistas/Alumno/agendaForma.html')
class VerGruposUsuario(webapp2.RequestHandler):
def get(self):
k=env.globals.get('session')
key = k[3]
usuario = db.get(key)
grupos = usuario.grupos
self.response.headers['Content-Type'] = 'text/html'
_despliegaGruposUsuario(self,usuario,grupos, '/vistas/Alumno/verGrupos.html')
class VerHorariosUsuario(webapp2.RequestHandler):
def get(self):
usuario = env.globals.get('session')[3]
horarios = getAgendaValida(usuario)
self.response.headers['Content-Type'] = 'text/html'
_despliegaHorariosUsuario(self,horarios, '/vistas/Alumno/verHorarios.html')
#===================================Finaliza Proceso de agendas
#=======================================Inicia Manejo de Periodos
class AgregarPeriodo(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarPeriodo(self, '/vistas/Periodo/agregarPeriodo.html')
class GrabarPeriodo(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
descripcion = self.request.get('descripcion')
fechaInicio = self.request.get('fechaInicio')
fechaFin = self.request.get('fechaFin')
actual = self.request.get('actual')
if actual == '1':
esActual = True
quitaActual()
else:
esActual = False
fi = to_datetime(fechaInicio)
ff = to_datetime(fechaFin)
grabaPeriodo(descripcion,fi,ff,esActual)
self.redirect('/verPeriodo') #Redireccion a la vista de Grupos de una Clinica
class EliminarPeriodo(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
deletePeriodo(key)
self.redirect('/verPeriodo') #Redireccion a la vista de Horarios
class VerPeriodo(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
#horarios = getAllHorarios(self.request.get('key'))
periodos = getAllPeriodos()
_despliegaVerPeriodo(self,periodos, '/vistas/Periodo/verPeriodo.html')
class EditarPeriodo(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
periodoKey = self.request.get('key')
periodo = getPeriodo(periodoKey)
_despliegaEditaPeriodo(self, periodo, '/vistas/Periodo/editaPeriodo.html')
class GrabarCambiosPeriodo(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
descripcion = self.request.get('descripcion')
fechaInicio = self.request.get('fechaInicio')
fechaFin = self.request.get('fechaFin')
actual = self.request.get('actual')
if actual == '1':
esActual = True
quitaActual()
else:
esActual = False
fi = to_datetime(fechaInicio)
ff = to_datetime(fechaFin)
periodoKey = self.request.get('key')
periodo = getPeriodo(periodoKey)
updatePeriodo(periodo,descripcion,fi,ff,esActual)
self.redirect('/verPeriodo') #Redireccion a la vista de Grupos de una Clinica
"""
Views
"""
def _despliegaLogin(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaRegistraCita(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaFormaCita(self,horario, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'horario':horario}))
def _despliegaVerUsuarios(self, usuarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuarios': usuarios }))
def _despliegaBienvenida(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaRegistroAlumno(self, clinicas, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinicas': clinicas }))
def _despliegaRegistraUsuario(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaAgregaHorarioClinica(self, clinicas, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinicas': clinicas }))
def _despliegaMostrarHorariosClinica(self, horarios,clinica, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'horarios': horarios,'clinica':clinica }))
def _despliegaAgregarClinica(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
"""
Despliega la vista para agregar un grupo nuevo
"""
def _despliegaAgregarGrupo(self,clinica, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinica':clinica}))
def _despliegaAgregarHorario(self,grupo, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo':grupo}))
def _despliegaVerClinicas(self, clinicas, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinicas': clinicas }))
def _despliegaEditaUsuario(self, usuario, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuario': usuario }))
"""
Vista de Grupos de una Clinica en Especial
"""
def _despliegaVerGrupos(self, clinica, grupos, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupos': grupos,'clinica':clinica}))
"""
Vista de Grupos de una Clinica en Especial
"""
def _despliegaVerHorarios(self, grupo, horarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo': grupo,'horarios':horarios}))
"""
Vista para editar Un grupo en especial
"""
def _despliegaEditaGrupo(self,clinica,grupo, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo':grupo,'clinica':clinica}))
"""
Vista para ver usuarios del sistema
"""
def _despliegaUsuariosAsignacion(self,usuarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuarios':usuarios}))
"""
Vista para ver clinicas para asignar
"""
def _despliegaClinicasAsignacion(self,usuario,clinicas,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuario':usuario,'clinicas':clinicas}))
"""
Vista para ver grupos a asignar
"""
def _despliegaGruposAsignacion(self,usuario,clinica,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuario':usuario,'clinica':clinica}))
"""
Despliega un mensaje de Exito y la liga de retorno
"""
def _despliegaExito(self,mensaje,liga,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'mensaje':mensaje,'liga':liga}))
def _despliegaError(self,mensaje,liga,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'mensaje':mensaje,'liga':liga}))
"""
Vista para editar Un horario
"""
def _despliegaEditaHorario(self,grupo, horario, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo':grupo,'horario':horario}))
"""
Vista de los Grupos a los que pertenece un usuario
"""
def _despliegaGruposUsuario(self,usuario,grupos, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupos':grupos,'usuario':usuario}))
def _despliegaEditaClinica(self, clinica, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinica': clinica }))
def _despliegaHorariosUsuario(self, horarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'horarios': horarios }))
"""
Vistas para manejo de periodos
"""
def _despliegaAgregarPeriodo(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaVerPeriodo(self,periodos, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'periodos':periodos}))
def _despliegaEditaPeriodo(self, periodo, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'periodo':periodo}))
app = webapp2.WSGIApplication([('/', MainPage),
('/iniciaSesion', IniciaSesion),
('/bienvenida', Bienvenida),
('/verUsuarios', VerUsuarios),
('/registroAlumno', RegistroAlumno),
('/grabaAlumno', GrabaAlumno),
('/registraUsuario', RegistraUsuario),
('/grabaUsuario', GrabaUsuario),
('/verClinicas', VerClinicas),
('/agregarClinica', AgregarClinica),
('/agregaHorarioClinica', AgregaHorarioClinica),
('/agregarHorario', AgregarHorario),
#Manejo de Clinicas
('/grabaClinica', GrabaClinica),
('/cerrarSesion', CerrarSesion),
('/grabaClinica', GrabaClinica),
('/eliminaUsuario', EliminaUsuario),
('/editaUsuario', EditaUsuario),
('/editaClinica', EditaClinica),
('/eliminaClinica', EliminaClinica),
('/verClinicas', VerClinicas),
('/agregarClinica', AgregarClinica),
#Fin manejo de Clinica
#Inicio de Manejo de Grupos
('/verGrupos', VerGrupos),
('/grabarGrupo', GrabarGrupo),
('/eliminarGrupo', EliminarGrupo),
('/agregarGrupo', AgregarGrupo),
('/editarGrupo', EditarGrupo),
#Fin de manejo de Grupo
#Inicio de Manejo de Horarios
('/verHorarios', VerHorarios),
('/grabarHorario', GrabarHorario),
('/eliminarHorario', EliminarHorario),
('/agregarHorario', AgregarHorario),
('/editarHorario', EditarHorario),
#Fin de manejo de Grupo
#Inicio de Agregar periodos
('/agregarPeriodo', AgregarPeriodo),
('/grabarPeriodo', GrabarPeriodo),
('/verPeriodo', VerPeriodo),
('/editarPeriodo', EditarPeriodo),
('/eliminarPeriodo', EliminarPeriodo),
('/grabarCambiosPeriodo', GrabarCambiosPeriodo),
#Finaliza manejo de periodos
#Inicia manejo de Asignacion
('/asignaUsuarios1', UsuariosAsignacion),
('/asignaUsuarios2', ClinicasAsignacion),
('/asignaUsuarios3', GruposAsignacion),
('/guardaAsignacion', GuardaAsignacion),
#Finaliza manejo de Asignacion
#Inicia Agenda
('/agendaPaciente',AgendaPaciente),
('/agendaPacienteExample',AgendaPacienteExample),
('/verGruposUsuario',VerGruposUsuario),
('/verHorariosUsuario',VerHorariosUsuario),
('/verFormaCita',VerFormaCita),
#Finaliza Agenda
('/cerrarSesion', CerrarSesion),
('/guardaCambiosUsuario', GuardaCambiosUsuario)], debug=True)
|
normal
|
{
"blob_id": "51cb750082ce93b6d14fe3aa40711836d493129c",
"index": 3692,
"step-1": "\"\"\"\r\nProyecto SA^3\r\nAutor: \tMario Lopez\r\n Luis Aviles\r\n\t\tJoaquin V\r\nFecha: Octubre del 2012\r\nversión: 1\r\n\"\"\"\r\n\r\n#Manejo de temlates en el HTML\r\nimport jinja2 \r\nfrom jinja2 import Environment, PackageLoader\r\n\r\nimport os\r\nimport cgi\r\nimport datetime\r\nimport urllib\r\n# for hashing\r\nimport hashlib\r\n#Layer de comunicacion con Modelo\r\nfrom modelo.Layer import *\r\nfrom modelo.Layer2 import *\r\n#Framework de Web para Python\r\nimport webapp2\r\n \r\n# API DataStore\r\nfrom google.appengine.ext import db\r\n\r\n# intitalization of template system. It says that HTML templates will\r\n# be found in current directory (\"__file__\")\r\n# variable env para sesiones\r\nenv = Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\r\nenv.filters['format_time'] = format_time\n\r\n# Método para verificar si hay una sesión activa\r\ndef before_filter(fn):\r\n\tdef inner_function(self):\r\n\t\tif not 'session' in env.globals:\r\n\t\t\tself.redirect('/')\r\n\t\treturn fn(self)\r\n\treturn inner_function\r\n\r\n\"\"\"\r\nREQUEST HANDLERS\r\n\"\"\"\r\n\r\nclass MainPage(webapp2.RequestHandler):\r\n \"\"\"Pantalla inicial. Despliega una forma para iniciar sesión\r\n \"\"\"\r\n \r\n def get(self):\r\n\r\n self.response.headers['Content-Type'] = 'text/html'\r\n \r\n # Generar el admin\r\n matricula = 'admin'\r\n password = 'admin'\r\n nombre = 'admin'\r\n apellidop = 'admin'\r\n apellidom = 'admin'\r\n tipo = 'admin'\r\n # Generar password\r\n h = hashlib.new('ripemd160')\r\n h.update(password)\r\n md5 = h.hexdigest()\r\n password = md5\r\n\r\n #Usuario(matricula = matricula, password = password, nombre = nombre, apellidop = apellidop, apellidom = apellidom, tipo = tipo).put()\r\n \r\n #productos = db.GqlQuery(\"SELECT * FROM Inventario\")\r\n \r\n #Desplegar lista de productos\r\n _despliegaLogin(self, '/vistas/login.html')\r\n\t\t\t\r\n\r\nclass VerUsuarios(webapp2.RequestHandler):\r\n\t\"\"\" Despliega los usuarios registrados\r\n\t\"\"\"\r\n\t\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\t\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t\t\t\r\n\t\tusuarios = db.GqlQuery(\"SELECT * FROM Usuario\")\r\n\t\t\r\n\t\t_despliegaVerUsuarios(self, usuarios, '/vistas/verUsuarios.html')\r\n\r\nclass RegistroAlumno(webapp2.RequestHandler):\r\n\t\"\"\" Formulario para registrar usuarios\r\n\t\"\"\"\r\n\t\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t\r\n\t\tclinicas = db.GqlQuery(\"SELECT * FROM Clinica\")\r\n\t\t\r\n\t\t_despliegaRegistroAlumno(self, clinicas, '/vistas/registroAlumno.html')\r\n\r\nclass GrabaAlumno(webapp2.RequestHandler):\r\n\t\r\n\tdef post(self):\r\n\t\tnombre = self.request.get('nombre')\r\n\t\tmatricula = self.request.get('matricula')\r\n\t\tpassword = self.request.get('password')\r\n\t\t\r\n\t\t# Generar password\r\n\t\th = hashlib.new('ripemd160')\r\n\t\th.update(password)\r\n\t\tmd5 = h.hexdigest()\r\n\t\t\r\n\t\tpassword = md5\r\n\r\nclass RegistraUsuario(webapp2.RequestHandler):\r\n\t\"\"\" Formulario para registrar usuarios\r\n\t\"\"\"\r\n\t\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t_despliegaRegistraUsuario(self, '/vistas/registraUsuario.html')\r\n\r\nclass GrabaUsuario(webapp2.RequestHandler):\r\n\t\r\n\tdef post(self):\r\n\t\tnombre = self.request.get('nombre')\r\n\t\tmatricula = self.request.get('matricula')\r\n\t\tpassword = self.request.get('password')\r\n\t\tapellidop = self.request.get('apellidop')\r\n\t\tapellidom = self.request.get('apellidom')\r\n\t\ttipo = self.request.get('tipo')\r\n\t\t\r\n\t\t# Generar password\r\n\t\th = hashlib.new('ripemd160')\r\n\t\th.update(password)\r\n\t\tmd5 = h.hexdigest()\r\n\t\t\r\n\t\tpassword = md5\r\n\t\t\r\n\t\tgrabaUsuario(matricula,password,nombre,apellidop,apellidom,tipo)\r\n\t\tself.redirect('/verUsuarios')\r\n\r\nclass IniciaSesion(webapp2.RequestHandler):\r\n\t\"\"\" Entrada: al dar click en iniciar sesión en la pantalla principal\r\n\t\tSalida: se crea la sesión del usuario y lo redirige a....\r\n\t\"\"\"\r\n\t\r\n\tdef post(self):\t\t\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tmatricula = self.request.get('matricula')\r\n\t\tpassword = self.request.get('password')\r\n\t\t\r\n\t\th = hashlib.new('ripemd160')\r\n\t\th.update(password)\r\n\t\tmd5 = h.hexdigest()\r\n\t\tpassword = md5\r\n\t\t\r\n\t\tuser = db.GqlQuery(\"SELECT * FROM Usuario WHERE matricula = '\" + matricula + \"' AND password = '\" + password + \"'\")\r\n\t\t\r\n\t\tif user.count() == 1:\r\n\t\t\tfor u in user:\r\n\t\t\t\tuser = []\r\n\t\t\t\tuser.append(u.nombre)\r\n\t\t\t\tuser.append(u.matricula)\r\n\t\t\t\tuser.append(u.tipo)\r\n\t\t\t\tuser.append(u.key())\n\t\t\t\tenv.globals['session'] = user\r\n\t\t\t\tself.redirect('/bienvenida')\r\n\t\telse:\r\n\t\t\tself.redirect('/')\r\n\r\nclass CerrarSesion(webapp2.RequestHandler):\r\n\t\"\"\" Entrada: al dar click en cerrar sesión\r\n\t\tSalida: se elimina la sesión actual y se\r\n\t\tredirige a la pantalla para iniciar sesión\r\n\t\"\"\"\r\n\t\r\n\tdef get(self):\r\n\t\tdel env.globals['session']\r\n\t\tself.redirect('/')\r\n\r\nclass Bienvenida(webapp2.RequestHandler):\r\n\t\"\"\"\tPantalla que se muestra al iniciar sesion\r\n\t\"\"\"\r\n\t\r\n\t@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t_despliegaBienvenida(self, '/vistas/bienvenida.html')\r\n\r\nclass AgregaHorarioClinica(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tclinicas = getAllClinicas()\r\n\t\t_despliegaAgregaHorarioClinica(self,clinicas, '/vistas/agregarHorarioClinica.html')\r\n\r\n\r\n#=======================================Funciones de Clinica\r\nclass AgregarClinica(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t_despliegaAgregarClinica(self, '/vistas/Clinica/agregarClinica.html')\r\n\r\nclass GrabaClinica(webapp2.RequestHandler):\r\n\tdef post(self):\r\n\t\tkey = self.request.get('key')\r\n\t\tnombre = self.request.get('nombre')\r\n\t\tdescripcion = self.request.get('descripcion')\r\n\t\tlocalizacion = self.request.get('localizacion')\r\n\t\tunidades = int(self.request.get('unidades'))\r\n\t\tdefectuosas = int(self.request.get('defectuosas'))\r\n\t\tif(key == None or key ==\"\"):\r\n\t\t\tgrabaClinica(nombre,descripcion,localizacion,unidades,defectuosas)\r\n\t\telse:\r\n\t\t\tactualizaClinica(key,nombre,descripcion,localizacion,unidades,defectuosas)\r\n\t\tself.redirect('/verClinicas') #Redireccion a la vista de clinicas\r\n\r\nclass EliminaClinica(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tkey = self.request.get('key')\r\n\t\teliminaClinica(key)\r\n\t\tself.redirect('/verClinicas') #Redireccion a las clinicas\r\n\nclass VerClinicas(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\ttime.sleep(.1)\n\t\tclinicas = getAllClinicas()\r\n\t\t_despliegaVerClinicas(self, clinicas, '/vistas/Clinica/verClinicas.html')\r\n\r\nclass EditaClinica(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tclinica = db.get(self.request.get('key'))\r\n\t\t_despliegaEditaClinica(self, clinica, '/vistas/Clinica/editaClinica.html')\r\n\r\n#=======================================Fin de manejos de Clinicas\r\n#=======================================Inicia Manejo de Grupos\r\nclass AgregarGrupo(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t_despliegaAgregarGrupo(self,self.request.get('key'), '/vistas/Grupo/agregarGrupo.html')\r\n\r\nclass GrabarGrupo(webapp2.RequestHandler):\r\n\tdef post(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tkey = self.request.get('key')\r\n\t\tclinica = self.request.get('clinica')\r\n\t\tnombre = self.request.get('nombre')\r\n\t\tdescripcion = self.request.get('descripcion')\r\n\t\tinicioAgenda = self.request.get('inicioAgenda')\n\t\tfinAgenda = self.request.get('finAgenda')\n\t\tfa = self.request.get('fa')\n\t\tif(key == None or key == \"\"):\r\n\t\t\tgrabaGrupo(clinica,nombre,descripcion,inicioAgenda,finAgenda,fa)\r\n\t\telse:\r\n\t\t\tactualizaGrupo(key,nombre,descripcion,inicioAgenda,finAgenda,fa)\r\n\t\tself.redirect('/verGrupos?key='+clinica) #Redireccion a la vista de Grupos de una Clinica\r\n\r\nclass EliminarGrupo(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tkey = self.request.get('key')\r\n\t\teliminaGrupo(key)\r\n\t\tself.redirect('/verGrupos?key='+self.request.get('clinica')) #Redireccion a la vista de los Grupos\r\n\r\nclass VerGrupos(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tclinica = getObject(self.request.get('key'))\r\n\t\t_despliegaVerGrupos(self,clinica, getGrupos(clinica.key()), '/vistas/Grupo/verGrupos.html')\r\n\r\nclass EditarGrupo(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tgrupo = db.get(self.request.get('key'))\r\n\t\tclinica = self.request.get('clinica')\r\n\t\t_despliegaEditaGrupo(self, clinica, grupo, '/vistas/Grupo/editaGrupo.html')\r\n\r\n#=======================================Fin de manejo de Grupos\r\n#=======================================Inicia Manejo de Asignacion de Grupo\r\nclass UsuariosAsignacion(webapp2.RequestHandler):\n\tdef get(self):\n\t\tself.response.headers['Content-Type']= 'text/html'\n\t\tusuarios = getAllUsuarios()\n\t\t_despliegaUsuariosAsignacion(self,usuarios,'/vistas/Asignacion/verUsuarios.html')\n\nclass ClinicasAsignacion(webapp2.RequestHandler):\n\tdef get(self):\n\t\tself.response.headers['Content-Type']= 'text/html'\n\t\tclinicas = getAllClinicas()\n\t\tusuario = getObject(self.request.get('usuario'))\n\t\t_despliegaClinicasAsignacion(self,usuario,clinicas,'/vistas/Asignacion/verClinicas.html')\n\nclass GruposAsignacion(webapp2.RequestHandler):\n\tdef get(self):\n\t\tself.response.headers['Content-Type']= 'text/html'\n\t\tclinica = getObject(self.request.get('clinica'))\n\t\tusuario = getObject(self.request.get('usuario'))\n\t\t_despliegaGruposAsignacion(self,usuario,clinica,'/vistas/Asignacion/verGrupos.html')\n\nclass GuardaAsignacion(webapp2.RequestHandler):\n\tdef get(self):\n\t\tself.response.headers['Content-Type']= 'text/html'\n\t\tgrupo = self.request.get('grupo')\n\t\tusuario = self.request.get('usuario')\n\t\t#Crea la asignacion entre ambos objetos\n\t\tcreaAsignacion(usuario,grupo)\n\t\t_despliegaExito(self,\"Usuario Asignado Correctamente\",'/asignaUsuarios1','/vistas/Exito.html')\n#=======================================Fin de Manejo de Asignacion de Grupo\r\n\nclass AgregarHorario(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t_despliegaAgregarHorario(self,self.request.get('key'), '/vistas/Horario/agregarHorario.html')\r\n\r\nclass GrabarHorario(webapp2.RequestHandler):\r\n\tdef post(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tkey = self.request.get('key')\r\n\t\tgrupo = self.request.get('grupo')\r\n\t\tdescripcion = self.request.get('descripcion')\r\n\t\tdia = self.request.get('dia')\r\n\t\thoraInicio = self.request.get('horaInicio')\r\n\t\thoraFin = self.request.get('horaFin')\r\n\t\tif(key == None or key == \"\"):\r\n\t\t\tgrabaHorario(grupo,descripcion,dia,horaInicio,horaFin)\r\n\t\telse:\r\n\t\t\tactualizaGrupo(key,descripcion,dia,horaInicio,horaFin)\r\n\t\tself.redirect('/verHorarios?key='+grupo) #Redireccion a la vista de Grupos de una Clinica\r\n\r\nclass EliminarHorario(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tkey = self.request.get('key')\r\n\t\teliminaHorario(key)\r\n\t\tself.redirect('/verHorarios?key='+self.request.get('grupo')) #Redireccion a la vista de Horarios\r\n\r\nclass VerHorarios(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t#horarios = getAllHorarios(self.request.get('key'))\r\n\t\tgrupo = getObject(self.request.get('key'))\r\n\t\t_despliegaVerHorarios(self,grupo, getHorarios(grupo), '/vistas/Horario/verHorarios.html')\r\n\r\nclass EditarHorario(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\thorario = db.get(self.request.get('key'))\r\n\t\tgrupo = self.request.get('grupo')\r\n\t\t_despliegaEditaHorario(self, grupo, horario, '/vistas/Horario/editaHorario.html')\r\n\r\n#=======================================Fin de manejo de Horario\r\n\r\nclass EliminaUsuario(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tusuarioKey = self.request.get('key')\r\n\t\tdeleteUsuario(usuarioKey)\r\n\t\tself.redirect('/verUsuarios')\r\n\r\nclass EditaUsuario(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tusuarioKey = self.request.get('key')\r\n\t\tusuario = getUsuario(usuarioKey);\r\n\t\t_despliegaEditaUsuario(self, usuario, '/vistas/editaUsuario.html')\r\n\nclass GuardaCambiosUsuario(webapp2.RequestHandler):\r\n\tdef post(self):\r\n\t\tusuarioKey = self.request.get('usuarioKey')\r\n\t\tnombre = self.request.get('nombre')\r\n\t\tmatricula = self.request.get('matricula')\r\n\t\tapellidop = self.request.get('apellidop')\r\n\t\tapellidom = self.request.get('apellidom')\r\n\t\ttipo = self.request.get('tipo')\r\n\t\t\r\n\t\tusuario = getUsuario(usuarioKey);\r\n\t\tupdateUsuario(usuario,nombre,matricula,apellidop,apellidom,tipo)\r\n\t\tself.redirect('/verUsuarios')\r\n#====================================Inicia Proceso de Agendas\nclass AgendaPacienteExample(webapp2.RequestHandler):\n\tdef get(self):\n\t\thorario = self.request.get('horario')\n\t\tdisponible = verificaDisponibilidadExample(horario)\n\t\tself.response.headers['Content-Type'] = 'text/html'\n\t\tself.response.out.write('Total:<br/>')\n\t\tself.response.out.write(disponible)\n\nclass AgendaPaciente(webapp2.RequestHandler):\n\tdef post(self):\n\t\thorario = self.request.get('horario')\n\t\tdescripcion = self.request.get('descripcion')\n\t\tfolio = self.request.get('folio')\n\t\tusuario = env.globals.get('session')[3]\n\t\tdisponible = verificaDisponibilidad(horario,usuario,descripcion,folio)\n\t\tself.response.headers['Content-Type'] = 'text/html'\n\t\tif (disponible[1] == True):\n\t\t\t_despliegaExito(self,\"El usuario ha agendado correctamente (No.\"+str(disponible[0])+\")\",'/verHorariosUsuario','/vistas/Exito.html')\n\t\telse:\n\t\t\t_despliegaError(self,\"Agenda Llena (\"+str(disponible[0])+\" Pacientes), no es posible agendar\",'/verHorariosUsuario','/vistas/Error.html')\n\nclass VerFormaCita(webapp2.RequestHandler):\n\tdef get(self):\n\t\thorario = self.request.get('horario')\n\t\tself.response.headers['Content-Type'] = 'text/html'\n\t\t_despliegaFormaCita(self,horario,'/vistas/Alumno/agendaForma.html')\n\t\t\n\nclass VerGruposUsuario(webapp2.RequestHandler):\n\tdef get(self):\n\t\tk=env.globals.get('session')\n\t\tkey = k[3]\n\t\tusuario = db.get(key)\n\t\tgrupos = usuario.grupos\n\t\tself.response.headers['Content-Type'] = 'text/html'\n\t\t_despliegaGruposUsuario(self,usuario,grupos, '/vistas/Alumno/verGrupos.html')\r\n\nclass VerHorariosUsuario(webapp2.RequestHandler):\n\tdef get(self):\n\t\tusuario = env.globals.get('session')[3]\n\t\thorarios = getAgendaValida(usuario)\n\t\tself.response.headers['Content-Type'] = 'text/html'\n\t\t_despliegaHorariosUsuario(self,horarios, '/vistas/Alumno/verHorarios.html')\r\n#===================================Finaliza Proceso de agendas\n\r\n#=======================================Inicia Manejo de Periodos\r\nclass AgregarPeriodo(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t_despliegaAgregarPeriodo(self, '/vistas/Periodo/agregarPeriodo.html')\r\n\r\nclass GrabarPeriodo(webapp2.RequestHandler):\r\n\tdef post(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tdescripcion = self.request.get('descripcion')\r\n\t\tfechaInicio = self.request.get('fechaInicio')\r\n\t\tfechaFin = self.request.get('fechaFin')\r\n\t\tactual = self.request.get('actual')\r\n\t\t\r\n\t\tif actual == '1':\r\n\t\t\tesActual = True\r\n\t\t\tquitaActual()\r\n\t\telse:\r\n\t\t\tesActual = False\r\n\t\t\t\t\r\n\t\tfi = to_datetime(fechaInicio)\r\n\t\tff = to_datetime(fechaFin)\r\n\r\n\t\tgrabaPeriodo(descripcion,fi,ff,esActual)\r\n\t\tself.redirect('/verPeriodo') #Redireccion a la vista de Grupos de una Clinica\r\n\r\nclass EliminarPeriodo(webapp2.RequestHandler):\r\n\tdef get(self):\r\n\t\tkey = self.request.get('key')\r\n\t\tdeletePeriodo(key)\r\n\t\tself.redirect('/verPeriodo') #Redireccion a la vista de Horarios\r\n\r\nclass VerPeriodo(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\t#horarios = getAllHorarios(self.request.get('key'))\r\n\t\tperiodos = getAllPeriodos()\r\n\t\t_despliegaVerPeriodo(self,periodos, '/vistas/Periodo/verPeriodo.html')\r\n\t\t\r\nclass EditarPeriodo(webapp2.RequestHandler):\r\n\t#@before_filter\r\n\tdef get(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tperiodoKey = self.request.get('key')\r\n\t\tperiodo = getPeriodo(periodoKey)\r\n\t\t_despliegaEditaPeriodo(self, periodo, '/vistas/Periodo/editaPeriodo.html')\r\n\r\nclass GrabarCambiosPeriodo(webapp2.RequestHandler):\r\n\tdef post(self):\r\n\t\tself.response.headers['Content-Type'] = 'text/html'\r\n\t\tdescripcion = self.request.get('descripcion')\r\n\t\tfechaInicio = self.request.get('fechaInicio')\r\n\t\tfechaFin = self.request.get('fechaFin')\r\n\t\tactual = self.request.get('actual')\r\n\t\t\r\n\t\tif actual == '1':\r\n\t\t\tesActual = True\r\n\t\t\tquitaActual()\r\n\t\telse:\r\n\t\t\tesActual = False\r\n\t\t\t\t\r\n\t\tfi = to_datetime(fechaInicio)\r\n\t\tff = to_datetime(fechaFin)\r\n\t\t\r\n\t\tperiodoKey = self.request.get('key')\r\n\t\tperiodo = getPeriodo(periodoKey)\r\n\t\t\r\n\t\tupdatePeriodo(periodo,descripcion,fi,ff,esActual)\r\n\t\tself.redirect('/verPeriodo') #Redireccion a la vista de Grupos de una Clinica\r\n\r\n\"\"\"\r\nViews\r\n\"\"\"\r\n\r\ndef _despliegaLogin(self, templateFile):\r\n template = env.get_template(templateFile)\r\n self.response.out.write(template.render({}))\r\n\r\ndef _despliegaRegistraCita(self, templateFile):\r\n template = env.get_template(templateFile)\r\n self.response.out.write(template.render({}))\r\n\ndef _despliegaFormaCita(self,horario, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'horario':horario}))\r\n\r\ndef _despliegaVerUsuarios(self, usuarios, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'usuarios': usuarios }))\r\n \r\ndef _despliegaBienvenida(self, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({}))\r\n\t\t\r\ndef _despliegaRegistroAlumno(self, clinicas, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'clinicas': clinicas }))\r\n\t\t\r\ndef _despliegaRegistraUsuario(self, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({}))\r\n\r\ndef _despliegaAgregaHorarioClinica(self, clinicas, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'clinicas': clinicas }))\r\n\r\ndef _despliegaMostrarHorariosClinica(self, horarios,clinica, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'horarios': horarios,'clinica':clinica }))\r\n\r\ndef _despliegaAgregarClinica(self, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({}))\r\n\r\n\"\"\"\r\nDespliega la vista para agregar un grupo nuevo\r\n\"\"\"\r\ndef _despliegaAgregarGrupo(self,clinica, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'clinica':clinica}))\r\ndef _despliegaAgregarHorario(self,grupo, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'grupo':grupo}))\r\ndef _despliegaVerClinicas(self, clinicas, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'clinicas': clinicas }))\r\n\t\t\r\ndef _despliegaEditaUsuario(self, usuario, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'usuario': usuario }))\r\n\r\n\"\"\"\r\n Vista de Grupos de una Clinica en Especial\r\n\"\"\"\r\ndef _despliegaVerGrupos(self, clinica, grupos, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'grupos': grupos,'clinica':clinica}))\r\n\r\n\"\"\"\r\n Vista de Grupos de una Clinica en Especial\r\n\"\"\"\r\ndef _despliegaVerHorarios(self, grupo, horarios, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'grupo': grupo,'horarios':horarios}))\r\n\r\n\"\"\"\r\n\tVista para editar Un grupo en especial\r\n\"\"\"\r\ndef _despliegaEditaGrupo(self,clinica,grupo, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'grupo':grupo,'clinica':clinica}))\r\n\r\n\n\"\"\"\r\n\tVista para ver usuarios del sistema\n\"\"\"\r\ndef _despliegaUsuariosAsignacion(self,usuarios, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'usuarios':usuarios}))\r\n\n\"\"\"\r\n\tVista para ver clinicas para asignar\n\"\"\"\r\ndef _despliegaClinicasAsignacion(self,usuario,clinicas,templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'usuario':usuario,'clinicas':clinicas}))\r\n\n\"\"\"\r\n\tVista para ver grupos a asignar\n\"\"\"\r\ndef _despliegaGruposAsignacion(self,usuario,clinica,templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'usuario':usuario,'clinica':clinica}))\r\n\n\"\"\"\r\n\tDespliega un mensaje de Exito y la liga de retorno\n\"\"\"\r\ndef _despliegaExito(self,mensaje,liga,templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'mensaje':mensaje,'liga':liga}))\r\n\ndef _despliegaError(self,mensaje,liga,templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'mensaje':mensaje,'liga':liga}))\r\n\"\"\"\r\n\tVista para editar Un horario\n\"\"\"\r\ndef _despliegaEditaHorario(self,grupo, horario, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'grupo':grupo,'horario':horario}))\r\n\"\"\"\r\n\tVista de los Grupos a los que pertenece un usuario\n\"\"\"\r\ndef _despliegaGruposUsuario(self,usuario,grupos, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'grupos':grupos,'usuario':usuario}))\r\n\ndef _despliegaEditaClinica(self, clinica, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'clinica': clinica }))\r\n\ndef _despliegaHorariosUsuario(self, horarios, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'horarios': horarios }))\r\n\r\n\"\"\"\r\n\tVistas para manejo de periodos\r\n\"\"\"\r\ndef _despliegaAgregarPeriodo(self, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({}))\r\n\t\t\r\ndef _despliegaVerPeriodo(self,periodos, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'periodos':periodos}))\r\n\r\ndef _despliegaEditaPeriodo(self, periodo, templateFile):\r\n\t\ttemplate = env.get_template(templateFile)\r\n\t\tself.response.out.write(template.render({'periodo':periodo}))\r\n\r\napp = webapp2.WSGIApplication([('/', MainPage),\r\n ('/iniciaSesion', IniciaSesion),\r\n ('/bienvenida', Bienvenida),\r\n ('/verUsuarios', VerUsuarios),\r\n ('/registroAlumno', RegistroAlumno),\r\n ('/grabaAlumno', GrabaAlumno),\r\n ('/registraUsuario', RegistraUsuario),\r\n ('/grabaUsuario', GrabaUsuario),\r\n ('/verClinicas', VerClinicas),\r\n ('/agregarClinica', AgregarClinica),\r\n ('/agregaHorarioClinica', AgregaHorarioClinica),\r\n ('/agregarHorario', AgregarHorario),\r\n #Manejo de Clinicas\r\n ('/grabaClinica', GrabaClinica),\r\n ('/cerrarSesion', CerrarSesion),\r\n ('/grabaClinica', GrabaClinica),\r\n ('/eliminaUsuario', EliminaUsuario),\r\n ('/editaUsuario', EditaUsuario),\r\n ('/editaClinica', EditaClinica),\r\n ('/eliminaClinica', EliminaClinica),\r\n ('/verClinicas', VerClinicas),\r\n ('/agregarClinica', AgregarClinica),\r\n #Fin manejo de Clinica\r\n #Inicio de Manejo de Grupos\r\n ('/verGrupos', VerGrupos),\r\n ('/grabarGrupo', GrabarGrupo),\r\n ('/eliminarGrupo', EliminarGrupo),\r\n ('/agregarGrupo', AgregarGrupo),\r\n ('/editarGrupo', EditarGrupo),\r\n #Fin de manejo de Grupo\r\n #Inicio de Manejo de Horarios\r\n ('/verHorarios', VerHorarios),\r\n ('/grabarHorario', GrabarHorario),\r\n ('/eliminarHorario', EliminarHorario),\r\n ('/agregarHorario', AgregarHorario),\r\n ('/editarHorario', EditarHorario),\r\n #Fin de manejo de Grupo\r\n\t\t\t\t\t\t\t #Inicio de Agregar periodos\r\n\t\t\t\t\t\t\t ('/agregarPeriodo', AgregarPeriodo),\r\n\t\t\t\t\t\t\t ('/grabarPeriodo', GrabarPeriodo),\r\n\t\t\t\t\t\t\t ('/verPeriodo', VerPeriodo),\r\n\t\t\t\t\t\t\t ('/editarPeriodo', EditarPeriodo),\r\n\t\t\t\t\t\t\t ('/eliminarPeriodo', EliminarPeriodo),\r\n\t\t\t\t\t\t\t ('/grabarCambiosPeriodo', GrabarCambiosPeriodo),\r\n\t\t\t\t\t\t\t\t#Finaliza manejo de periodos\n\t\t\t\t\t\t\t\t#Inicia manejo de Asignacion\n ('/asignaUsuarios1', UsuariosAsignacion),\r\n\t ('/asignaUsuarios2', ClinicasAsignacion),\r\n\t\t ('/asignaUsuarios3', GruposAsignacion),\r\n\t\t\t ('/guardaAsignacion', GuardaAsignacion),\r\t\n\t\t\t\t#Finaliza manejo de Asignacion\n\t\t\t\t#Inicia Agenda\n\t\t\t ('/agendaPaciente',AgendaPaciente),\r\n\t\t\t ('/agendaPacienteExample',AgendaPacienteExample),\r\n\t\t\t ('/verGruposUsuario',VerGruposUsuario),\r\n\t\t\t ('/verHorariosUsuario',VerHorariosUsuario),\r\n\t\t\t ('/verFormaCita',VerFormaCita),\r\n\t\t\t\t#Finaliza Agenda\n ('/cerrarSesion', CerrarSesion),\n\t\t\t\t('/guardaCambiosUsuario', GuardaCambiosUsuario)], debug=True)\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def presses(phrase):
keyboard = ['1', 'ABC2', 'DEF3', 'GHI4', 'JKL5', 'MNO6', 'PQRS7',
'TUV8', 'WXYZ9', '*', ' 0', '#']
amount = 0
for lttr in phrase.upper():
for key in keyboard:
try:
i = key.index(lttr)
i += 1
amount += i
except ValueError:
pass
return amount
|
normal
|
{
"blob_id": "c2e9a93861080be616b6d833a9343f1a2f018a0b",
"index": 5039,
"step-1": "<mask token>\n",
"step-2": "def presses(phrase):\n keyboard = ['1', 'ABC2', 'DEF3', 'GHI4', 'JKL5', 'MNO6', 'PQRS7',\n 'TUV8', 'WXYZ9', '*', ' 0', '#']\n amount = 0\n for lttr in phrase.upper():\n for key in keyboard:\n try:\n i = key.index(lttr)\n i += 1\n amount += i\n except ValueError:\n pass\n return amount\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/env python3
import numpy as np
import os
import random
import pandas as pd
def read_chunk(reader, chunk_size):
data = {}
for i in range(chunk_size):
ret = reader.read_next()
for k, v in ret.items():
if k not in data:
data[k] = []
data[k].append(v)
data['input'] = np.array(data['input'])
data['masking'] = np.array(data['masking'])
data['timestamp'] = np.array(data['timestamp'])
data['label'] = np.array(data['label'])
return data
|
normal
|
{
"blob_id": "dc28c3426f47bef8b691a06d54713bc68696ee44",
"index": 8309,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_chunk(reader, chunk_size):\n data = {}\n for i in range(chunk_size):\n ret = reader.read_next()\n for k, v in ret.items():\n if k not in data:\n data[k] = []\n data[k].append(v)\n data['input'] = np.array(data['input'])\n data['masking'] = np.array(data['masking'])\n data['timestamp'] = np.array(data['timestamp'])\n data['label'] = np.array(data['label'])\n return data\n",
"step-3": "import numpy as np\nimport os\nimport random\nimport pandas as pd\n\n\ndef read_chunk(reader, chunk_size):\n data = {}\n for i in range(chunk_size):\n ret = reader.read_next()\n for k, v in ret.items():\n if k not in data:\n data[k] = []\n data[k].append(v)\n data['input'] = np.array(data['input'])\n data['masking'] = np.array(data['masking'])\n data['timestamp'] = np.array(data['timestamp'])\n data['label'] = np.array(data['label'])\n return data\n",
"step-4": "#!/usr/bin/env python3\n\nimport numpy as np\nimport os\nimport random\nimport pandas as pd\n\ndef read_chunk(reader, chunk_size):\n\n data = {}\n for i in range(chunk_size):\n ret = reader.read_next()\n for k, v in ret.items():\n if k not in data:\n data[k] = []\n data[k].append(v)\n data['input'] = np.array(data['input'])\n data['masking'] = np.array(data['masking'])\n data['timestamp'] = np.array(data['timestamp'])\n data['label'] = np.array(data['label'])\n return data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
include ("RecExRecoTest/RecExRecoTest_RTT_common.py")
from BTagging.BTaggingFlags import BTaggingFlags
BTaggingFlags.Active=False
# main jobOption
include ("RecExCommon/rdotoesdnotrigger.py")
include ("RecExRecoTest/RecExRecoTest_RTT_common_postOptions.py")
|
normal
|
{
"blob_id": "34c91d273648ae72731fba7f5519a4920d77c0c3",
"index": 7192,
"step-1": "<mask token>\n",
"step-2": "include('RecExRecoTest/RecExRecoTest_RTT_common.py')\n<mask token>\ninclude('RecExCommon/rdotoesdnotrigger.py')\ninclude('RecExRecoTest/RecExRecoTest_RTT_common_postOptions.py')\n",
"step-3": "include('RecExRecoTest/RecExRecoTest_RTT_common.py')\n<mask token>\nBTaggingFlags.Active = False\ninclude('RecExCommon/rdotoesdnotrigger.py')\ninclude('RecExRecoTest/RecExRecoTest_RTT_common_postOptions.py')\n",
"step-4": "include('RecExRecoTest/RecExRecoTest_RTT_common.py')\nfrom BTagging.BTaggingFlags import BTaggingFlags\nBTaggingFlags.Active = False\ninclude('RecExCommon/rdotoesdnotrigger.py')\ninclude('RecExRecoTest/RecExRecoTest_RTT_common_postOptions.py')\n",
"step-5": "include (\"RecExRecoTest/RecExRecoTest_RTT_common.py\")\n\n\nfrom BTagging.BTaggingFlags import BTaggingFlags\nBTaggingFlags.Active=False\n\n# main jobOption\ninclude (\"RecExCommon/rdotoesdnotrigger.py\")\n\n\ninclude (\"RecExRecoTest/RecExRecoTest_RTT_common_postOptions.py\")\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def MainRun():
Cmd()
Test.TestGo()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def MainRun():
Cmd()
Test.TestGo()
def Cmd():
if len(sys.argv) != 3:
print('error cmdargument count!')
return
cmd = sys.argv[1]
if cmd != '-serverid':
print('error cmdargument!')
return
cmdvalue = sys.argv[2]
if not cmdvalue.isdigit():
print('error cmdargument type!')
return
GeneralSet.gServerId = int(cmdvalue)
print(GeneralSet.gServerId)
<|reserved_special_token_1|>
import sys
import Common.Common.GeneralSet as GeneralSet
import TestExample.Test as Test
from Common.Common.ProcessDefine import *
def MainRun():
Cmd()
Test.TestGo()
def Cmd():
if len(sys.argv) != 3:
print('error cmdargument count!')
return
cmd = sys.argv[1]
if cmd != '-serverid':
print('error cmdargument!')
return
cmdvalue = sys.argv[2]
if not cmdvalue.isdigit():
print('error cmdargument type!')
return
GeneralSet.gServerId = int(cmdvalue)
print(GeneralSet.gServerId)
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import Common.Common.GeneralSet as GeneralSet
import TestExample.Test as Test
from Common.Common.ProcessDefine import *
def MainRun():
Cmd()
Test.TestGo()
def Cmd():
if (len(sys.argv) != 3):
print('error cmdargument count!')
return
cmd = sys.argv[1]
if cmd != '-serverid':
print('error cmdargument!')
return
cmdvalue = sys.argv[2]
if not cmdvalue.isdigit():
print('error cmdargument type!')
return
GeneralSet.gServerId = int(cmdvalue)
print(GeneralSet.gServerId)
|
flexible
|
{
"blob_id": "734561c2f127418bdc612f84b3b1ba125b6a2723",
"index": 3784,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef MainRun():\n Cmd()\n Test.TestGo()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef MainRun():\n Cmd()\n Test.TestGo()\n\n\ndef Cmd():\n if len(sys.argv) != 3:\n print('error cmdargument count!')\n return\n cmd = sys.argv[1]\n if cmd != '-serverid':\n print('error cmdargument!')\n return\n cmdvalue = sys.argv[2]\n if not cmdvalue.isdigit():\n print('error cmdargument type!')\n return\n GeneralSet.gServerId = int(cmdvalue)\n print(GeneralSet.gServerId)\n",
"step-4": "import sys\nimport Common.Common.GeneralSet as GeneralSet\nimport TestExample.Test as Test\nfrom Common.Common.ProcessDefine import *\n\n\ndef MainRun():\n Cmd()\n Test.TestGo()\n\n\ndef Cmd():\n if len(sys.argv) != 3:\n print('error cmdargument count!')\n return\n cmd = sys.argv[1]\n if cmd != '-serverid':\n print('error cmdargument!')\n return\n cmdvalue = sys.argv[2]\n if not cmdvalue.isdigit():\n print('error cmdargument type!')\n return\n GeneralSet.gServerId = int(cmdvalue)\n print(GeneralSet.gServerId)\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport Common.Common.GeneralSet as GeneralSet\nimport TestExample.Test as Test\nfrom Common.Common.ProcessDefine import *\n \ndef MainRun():\n Cmd()\n Test.TestGo()\n \ndef Cmd():\n if (len(sys.argv) != 3):\n print('error cmdargument count!')\n return\n\n cmd = sys.argv[1]\n if cmd != '-serverid':\n print('error cmdargument!')\n return\n cmdvalue = sys.argv[2]\n if not cmdvalue.isdigit():\n print('error cmdargument type!')\n return\n GeneralSet.gServerId = int(cmdvalue)\n print(GeneralSet.gServerId)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for cnt in range(1, T + 1):
S = input()
S_list = []
card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}
print('#' + str(cnt), end=' ')
for i in range(0, len(S), 3):
S_list.append(S[i:i + 3])
if len(set(S_list)) != len(S_list):
print('ERROR')
else:
for i in S_list:
card[i[0]] -= 1
print(*card.values())
<|reserved_special_token_1|>
T = int(input())
for cnt in range(1, T + 1):
S = input()
S_list = []
card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}
print('#' + str(cnt), end=' ')
for i in range(0, len(S), 3):
S_list.append(S[i:i + 3])
if len(set(S_list)) != len(S_list):
print('ERROR')
else:
for i in S_list:
card[i[0]] -= 1
print(*card.values())
|
flexible
|
{
"blob_id": "45750152313fd3670867c61d0173e4cb11a806ba",
"index": 4468,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor cnt in range(1, T + 1):\n S = input()\n S_list = []\n card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}\n print('#' + str(cnt), end=' ')\n for i in range(0, len(S), 3):\n S_list.append(S[i:i + 3])\n if len(set(S_list)) != len(S_list):\n print('ERROR')\n else:\n for i in S_list:\n card[i[0]] -= 1\n print(*card.values())\n",
"step-3": "T = int(input())\nfor cnt in range(1, T + 1):\n S = input()\n S_list = []\n card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}\n print('#' + str(cnt), end=' ')\n for i in range(0, len(S), 3):\n S_list.append(S[i:i + 3])\n if len(set(S_list)) != len(S_list):\n print('ERROR')\n else:\n for i in S_list:\n card[i[0]] -= 1\n print(*card.values())\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def performance():
give_speech()
visualize_dow_jones()
give_art_critiques()
stare_at_people()
try_hipster_social_interaction()
share_feelings_with_everyone()
perform_slapstick_humor()
finish()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def performance():
give_speech()
visualize_dow_jones()
give_art_critiques()
stare_at_people()
try_hipster_social_interaction()
share_feelings_with_everyone()
perform_slapstick_humor()
finish()
if __name__ == '__main__':
performance()
<|reserved_special_token_1|>
from introduction import give_speech
from staring import stare_at_people
from dow_jones import visualize_dow_jones
from art_critic import give_art_critiques
from hipster import try_hipster_social_interaction
from empathy import share_feelings_with_everyone
from slapstick import perform_slapstick_humor
from ending import finish
def performance():
give_speech()
visualize_dow_jones()
give_art_critiques()
stare_at_people()
try_hipster_social_interaction()
share_feelings_with_everyone()
perform_slapstick_humor()
finish()
if __name__ == '__main__':
performance()
|
flexible
|
{
"blob_id": "d218b72d1992a30ad07a1edca1caf04b7b1985f6",
"index": 7834,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef performance():\n give_speech()\n visualize_dow_jones()\n give_art_critiques()\n stare_at_people()\n try_hipster_social_interaction()\n share_feelings_with_everyone()\n perform_slapstick_humor()\n finish()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef performance():\n give_speech()\n visualize_dow_jones()\n give_art_critiques()\n stare_at_people()\n try_hipster_social_interaction()\n share_feelings_with_everyone()\n perform_slapstick_humor()\n finish()\n\n\nif __name__ == '__main__':\n performance()\n",
"step-4": "from introduction import give_speech\nfrom staring import stare_at_people\nfrom dow_jones import visualize_dow_jones\nfrom art_critic import give_art_critiques\nfrom hipster import try_hipster_social_interaction\nfrom empathy import share_feelings_with_everyone\nfrom slapstick import perform_slapstick_humor\nfrom ending import finish\n\n\ndef performance():\n give_speech()\n visualize_dow_jones()\n give_art_critiques()\n stare_at_people()\n try_hipster_social_interaction()\n share_feelings_with_everyone()\n perform_slapstick_humor()\n finish()\n\n\nif __name__ == '__main__':\n performance()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.init()
<|reserved_special_token_0|>
screen.fill(Brack)
pygame.draw.rect(screen, White, [420, 134, 400, 500], 0)
<|reserved_special_token_0|>
screen.blit(text_fmt0, (545, 140))
pygame.display.update()
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)
pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)
pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)
button1 = text_1.render('切 换', 1, Brack)
screen.blit(button1, (611, 444))
button = text_1.render(button_text0, 1, Brack)
screen.blit(button, (506, 444))
B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,
button_text[1], color[1]], [2, 647, 290, button_text[2], color[
2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,
button_text[4], color[4]]]
if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:
response2 = urllib.request.urlopen(
'http://localhost:5000/carrier/status')
html2 = response2.read()
text2 = json.loads(html2)
a = text2['sensors']
b = text2['pos']
C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],
[3, 662, 490, a[3]], [4, 732, 490, a[4]]]
pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)
pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (B0[b], 525))
if button_text0 == '手动状态:':
for t in range(5):
if button_text[t] == '结 束':
button_text[t] = '开 始'
color[t] = Green
elif button_text0 == '自动状态:':
if button_text[0] == '结 束':
response0 = urllib.request.urlopen(line[0])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[0] = '开 始'
button_text[1] = '结 束'
elif button_text[1] == '结 束':
response0 = urllib.request.urlopen(line[1])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[1] = '开 始'
button_text[2] = '结 束'
elif button_text[2] == '结 束':
response0 = urllib.request.urlopen(line[2])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[2] = '开 始'
button_text[3] = '结 束'
elif button_text[3] == '结 束':
response0 = urllib.request.urlopen(line[3])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[3] = '开 始'
button_text[4] = '结 束'
elif button_text[4] == '结 束':
response0 = urllib.request.urlopen(line[4])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[4] = '开 始'
for i in B:
Process(i[0], i[1], i[2], i[3], i[4])
for v in C:
Station(v[0], v[1], v[2], v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index == 0:
if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:
if button_text0 == '自动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '手动状态:'
color = [Green, Green, Green, Green, Green]
elif button_text0 == '手动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '自动状态:'
button_text[0] = '结 束'
color = [Gray, Gray, Gray, Gray, Gray]
for i in B:
if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[
1] <= i[2] + 25:
if button_text == ['开 始', '开 始', '开 始',
'开 始', '开 始'
] and button_text0 == '手动状态:':
color[i[0]] = Red
button_text[i[0]] = '结 束'
response1 = urllib.request.urlopen(line
[i[0]])
html1 = response1.read()
text1 = json.loads(html1)
print(text1)
for v in C:
if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[
1] <= v[2] + 28:
response3 = urllib.request.urlopen(line0
[v[0]])
html3 = response3.read()
text3 = json.loads(html3)
pygame.draw.rect(screen, White, [420,
525, 400, 50], 0)
pygame.draw.rect(screen, White, [420,
615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img,
(52, 50))
screen.blit(img, (B0[int(text3)], 525))
C = [[0, 452, 490, CGQ[v[0]][0]], [1,
522, 490, CGQ[v[0]][1]], [2, 592,
490, CGQ[v[0]][2]], [3, 662, 490,
CGQ[v[0]][3]], [4, 732, 490, CGQ[v[
0]][4]]]
for f in C:
Station(f[0], f[1], f[2], f[3])
pygame.display.update()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.init()
Brack = [0, 0, 0]
White = [255, 255, 255]
Green = [0, 255, 0]
Red = [255, 0, 0]
Gray = [169, 169, 169]
button_text = ['开 始', '开 始', '开 始', '开 始', '开 始']
line = ['http://localhost:5050/mixer/000',
'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',
'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']
line0 = ['http://localhost:5000/carrier/moveto/0',
'http://localhost:5000/carrier/moveto/1',
'http://localhost:5000/carrier/moveto/2',
'http://localhost:5000/carrier/moveto/3',
'http://localhost:5000/carrier/moveto/4']
CGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],
[1, 1, 1, 1, 0]]
color = [Green, Green, Green, Green, Green]
button_text0 = '手动状态:'
button_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']
Num = ['0', '1', '2', '3', '4']
B0 = [452, 522, 592, 662, 732]
screen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)
screen.fill(Brack)
pygame.draw.rect(screen, White, [420, 134, 400, 500], 0)
text = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']
text_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)
text_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)
text_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)
text_fmt0 = text_0.render('操 作 界 面', 2, Brack)
screen.blit(text_fmt0, (545, 140))
pygame.display.update()
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)
pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)
pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)
button1 = text_1.render('切 换', 1, Brack)
screen.blit(button1, (611, 444))
button = text_1.render(button_text0, 1, Brack)
screen.blit(button, (506, 444))
B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,
button_text[1], color[1]], [2, 647, 290, button_text[2], color[
2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,
button_text[4], color[4]]]
if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:
response2 = urllib.request.urlopen(
'http://localhost:5000/carrier/status')
html2 = response2.read()
text2 = json.loads(html2)
a = text2['sensors']
b = text2['pos']
C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],
[3, 662, 490, a[3]], [4, 732, 490, a[4]]]
pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)
pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (B0[b], 525))
if button_text0 == '手动状态:':
for t in range(5):
if button_text[t] == '结 束':
button_text[t] = '开 始'
color[t] = Green
elif button_text0 == '自动状态:':
if button_text[0] == '结 束':
response0 = urllib.request.urlopen(line[0])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[0] = '开 始'
button_text[1] = '结 束'
elif button_text[1] == '结 束':
response0 = urllib.request.urlopen(line[1])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[1] = '开 始'
button_text[2] = '结 束'
elif button_text[2] == '结 束':
response0 = urllib.request.urlopen(line[2])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[2] = '开 始'
button_text[3] = '结 束'
elif button_text[3] == '结 束':
response0 = urllib.request.urlopen(line[3])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[3] = '开 始'
button_text[4] = '结 束'
elif button_text[4] == '结 束':
response0 = urllib.request.urlopen(line[4])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[4] = '开 始'
for i in B:
Process(i[0], i[1], i[2], i[3], i[4])
for v in C:
Station(v[0], v[1], v[2], v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index == 0:
if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:
if button_text0 == '自动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '手动状态:'
color = [Green, Green, Green, Green, Green]
elif button_text0 == '手动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '自动状态:'
button_text[0] = '结 束'
color = [Gray, Gray, Gray, Gray, Gray]
for i in B:
if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[
1] <= i[2] + 25:
if button_text == ['开 始', '开 始', '开 始',
'开 始', '开 始'
] and button_text0 == '手动状态:':
color[i[0]] = Red
button_text[i[0]] = '结 束'
response1 = urllib.request.urlopen(line
[i[0]])
html1 = response1.read()
text1 = json.loads(html1)
print(text1)
for v in C:
if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[
1] <= v[2] + 28:
response3 = urllib.request.urlopen(line0
[v[0]])
html3 = response3.read()
text3 = json.loads(html3)
pygame.draw.rect(screen, White, [420,
525, 400, 50], 0)
pygame.draw.rect(screen, White, [420,
615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img,
(52, 50))
screen.blit(img, (B0[int(text3)], 525))
C = [[0, 452, 490, CGQ[v[0]][0]], [1,
522, 490, CGQ[v[0]][1]], [2, 592,
490, CGQ[v[0]][2]], [3, 662, 490,
CGQ[v[0]][3]], [4, 732, 490, CGQ[v[
0]][4]]]
for f in C:
Station(f[0], f[1], f[2], f[3])
pygame.display.update()
<|reserved_special_token_1|>
import time
import json
import pygame
from pygame.locals import *
import urllib.request
from pygame.color import THECOLORS
pygame.init()
Brack = [0, 0, 0]
White = [255, 255, 255]
Green = [0, 255, 0]
Red = [255, 0, 0]
Gray = [169, 169, 169]
button_text = ['开 始', '开 始', '开 始', '开 始', '开 始']
line = ['http://localhost:5050/mixer/000',
'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',
'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']
line0 = ['http://localhost:5000/carrier/moveto/0',
'http://localhost:5000/carrier/moveto/1',
'http://localhost:5000/carrier/moveto/2',
'http://localhost:5000/carrier/moveto/3',
'http://localhost:5000/carrier/moveto/4']
CGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],
[1, 1, 1, 1, 0]]
color = [Green, Green, Green, Green, Green]
button_text0 = '手动状态:'
button_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']
Num = ['0', '1', '2', '3', '4']
B0 = [452, 522, 592, 662, 732]
screen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)
screen.fill(Brack)
pygame.draw.rect(screen, White, [420, 134, 400, 500], 0)
text = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']
text_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)
text_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)
text_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)
text_fmt0 = text_0.render('操 作 界 面', 2, Brack)
screen.blit(text_fmt0, (545, 140))
pygame.display.update()
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)
pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)
pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)
button1 = text_1.render('切 换', 1, Brack)
screen.blit(button1, (611, 444))
button = text_1.render(button_text0, 1, Brack)
screen.blit(button, (506, 444))
B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,
button_text[1], color[1]], [2, 647, 290, button_text[2], color[
2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,
button_text[4], color[4]]]
if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:
response2 = urllib.request.urlopen(
'http://localhost:5000/carrier/status')
html2 = response2.read()
text2 = json.loads(html2)
a = text2['sensors']
b = text2['pos']
C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],
[3, 662, 490, a[3]], [4, 732, 490, a[4]]]
pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)
pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (B0[b], 525))
if button_text0 == '手动状态:':
for t in range(5):
if button_text[t] == '结 束':
button_text[t] = '开 始'
color[t] = Green
elif button_text0 == '自动状态:':
if button_text[0] == '结 束':
response0 = urllib.request.urlopen(line[0])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[0] = '开 始'
button_text[1] = '结 束'
elif button_text[1] == '结 束':
response0 = urllib.request.urlopen(line[1])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[1] = '开 始'
button_text[2] = '结 束'
elif button_text[2] == '结 束':
response0 = urllib.request.urlopen(line[2])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[2] = '开 始'
button_text[3] = '结 束'
elif button_text[3] == '结 束':
response0 = urllib.request.urlopen(line[3])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[3] = '开 始'
button_text[4] = '结 束'
elif button_text[4] == '结 束':
response0 = urllib.request.urlopen(line[4])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[4] = '开 始'
for i in B:
Process(i[0], i[1], i[2], i[3], i[4])
for v in C:
Station(v[0], v[1], v[2], v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index == 0:
if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:
if button_text0 == '自动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '手动状态:'
color = [Green, Green, Green, Green, Green]
elif button_text0 == '手动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '自动状态:'
button_text[0] = '结 束'
color = [Gray, Gray, Gray, Gray, Gray]
for i in B:
if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[
1] <= i[2] + 25:
if button_text == ['开 始', '开 始', '开 始',
'开 始', '开 始'
] and button_text0 == '手动状态:':
color[i[0]] = Red
button_text[i[0]] = '结 束'
response1 = urllib.request.urlopen(line
[i[0]])
html1 = response1.read()
text1 = json.loads(html1)
print(text1)
for v in C:
if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[
1] <= v[2] + 28:
response3 = urllib.request.urlopen(line0
[v[0]])
html3 = response3.read()
text3 = json.loads(html3)
pygame.draw.rect(screen, White, [420,
525, 400, 50], 0)
pygame.draw.rect(screen, White, [420,
615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img,
(52, 50))
screen.blit(img, (B0[int(text3)], 525))
C = [[0, 452, 490, CGQ[v[0]][0]], [1,
522, 490, CGQ[v[0]][1]], [2, 592,
490, CGQ[v[0]][2]], [3, 662, 490,
CGQ[v[0]][3]], [4, 732, 490, CGQ[v[
0]][4]]]
for f in C:
Station(f[0], f[1], f[2], f[3])
pygame.display.update()
<|reserved_special_token_1|>
import time
import json
import pygame
from pygame.locals import *
import urllib.request
from pygame.color import THECOLORS
pygame.init()
Brack=[0,0,0]
White=[255,255,255]
Green=[0,255,0]
Red=[255,0,0]
Gray=[169,169,169]
button_text=["开 始","开 始","开 始","开 始","开 始"]
line=['http://localhost:5050/mixer/000','http://localhost:5050/mixer/100','http://localhost:5050/mixer/200','http://localhost:5050/mixer/300','http://localhost:5050/mixer/400']
line0=['http://localhost:5000/carrier/moveto/0','http://localhost:5000/carrier/moveto/1','http://localhost:5000/carrier/moveto/2','http://localhost:5000/carrier/moveto/3','http://localhost:5000/carrier/moveto/4']
CGQ=[[0,1,1,1,1],[1,0,1,1,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,0]]
color=[Green,Green,Green,Green,Green]
button_text0="手动状态:"
button_text1=["工位0","工位1","工位2","工位3","工位4"]
Num=['0','1','2','3','4']
B0=[452,522,592,662,732]
screen = pygame.display.set_mode((1240,768),FULLSCREEN,32)
screen.fill(Brack)
pygame.draw.rect(screen,White,[420,134,400,500],0)
text=["工 序 甲:","工 序 乙:","工 序 丙:","工 序 丁:","工 序 戊:"]
text_0=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",22)
text_1=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",18)
text_2=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",15)
text_fmt0=text_0.render("操 作 界 面",2,Brack)
screen.blit(text_fmt0,(545,140))
pygame.display.update()
def Process(num,x,y,button_text,color):
text_fmt1=text_1.render(text[num],1,Brack)
screen.blit(text_fmt1,(x-127,y))
pygame.draw.rect(screen,Brack,[x,y,60,25],2)
pygame.draw.rect(screen,color,[x+2,y+2,57,22],0)
button=text_2.render(button_text,1,Brack)
screen.blit(button,(x+13,y+3))
pygame.display.update()
def Station(num,x,y,a):
pygame.draw.rect(screen,Brack,[x,y,55,28],2)
pygame.draw.rect(screen,Green,[x+2,y+2,52,25],0)
button=text_2.render(button_text1[num],1,Brack)
screen.blit(button,(x+9,y+4))
img=pygame.image.load('cgq.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(x,y+80))
button=text_1.render(Num[a],1,Brack)
screen.blit(button,(x+20,610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen,White,[506,440,85,28],0)
pygame.draw.rect(screen,Brack,[597,440,65,28],2)
pygame.draw.rect(screen,Green,[599,442,62,25],0)
button1=text_1.render("切 换",1,Brack)
screen.blit(button1,(611,444))
button=text_1.render(button_text0,1,Brack)
screen.blit(button,(506,444))
B=[[0,647,190,button_text[0],color[0]],[1,647,240,button_text[1],color[1]],[2,647,290,button_text[2],color[2]],[3,647,340,button_text[3],color[3]],[4,647,390,button_text[4],color[4]]]
if button_text==["开 始","开 始","开 始","开 始","开 始"]:
response2=urllib.request.urlopen('http://localhost:5000/carrier/status')
html2=response2.read()
text2=json.loads(html2)
a=text2['sensors']
b=text2['pos']
C=[[0,452,490,a[0]],[1,522,490,a[1]],[2,592,490,a[2]],[3,662,490,a[3]],[4,732,490,a[4]]]
pygame.draw.rect(screen,White,[420,525,400,50],0)
pygame.draw.rect(screen,White,[420,615,400,30],0)
img=pygame.image.load('car.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(B0[b],525))
if button_text0=="手动状态:":
for t in range(5):
if button_text[t]=="结 束":
button_text[t]="开 始"
color[t]=Green
elif button_text0=="自动状态:":
if button_text[0]=="结 束":
response0=urllib.request.urlopen(line[0])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[0]="开 始"
button_text[1]="结 束"
elif button_text[1]=="结 束":
response0=urllib.request.urlopen(line[1])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[1]="开 始"
button_text[2]="结 束"
elif button_text[2]=="结 束":
response0=urllib.request.urlopen(line[2])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[2]="开 始"
button_text[3]="结 束"
elif button_text[3]=="结 束":
response0=urllib.request.urlopen(line[3])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[3]="开 始"
button_text[4]="结 束"
elif button_text[4]=="结 束":
response0=urllib.request.urlopen(line[4])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[4]="开 始"
for i in B:
Process(i[0],i[1],i[2],i[3],i[4])
for v in C:
Station(v[0],v[1],v[2],v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index==0:
if 597<=pos[0]<=662 and 440<=pos[1]<=468:
if button_text0=="自动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]:
button_text0="手动状态:"
color=[Green,Green,Green,Green,Green]
elif button_text0=="手动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]:
button_text0="自动状态:"
button_text[0]="结 束"
color=[Gray,Gray,Gray,Gray,Gray]
for i in B:
if i[1]<=pos[0]<=i[1]+60 and i[2]<=pos[1]<=i[2]+25:
if button_text==["开 始","开 始","开 始","开 始","开 始"] and button_text0=="手动状态:":
color[i[0]]=Red
button_text[i[0]]="结 束"
response1=urllib.request.urlopen(line[i[0]])
html1=response1.read()
text1=json.loads(html1)
print(text1)
for v in C:
if v[1]<=pos[0]<=v[1]+60 and v[2]<=pos[1]<=v[2]+28:
response3=urllib.request.urlopen(line0[v[0]])
html3=response3.read()
text3=json.loads(html3)
pygame.draw.rect(screen,White,[420,525,400,50],0)
pygame.draw.rect(screen,White,[420,615,400,30],0)
img=pygame.image.load('car.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(B0[int(text3)],525))
C=[[0,452,490,CGQ[v[0]][0]],[1,522,490,CGQ[v[0]][1]],[2,592,490,CGQ[v[0]][2]],[3,662,490,CGQ[v[0]][3]],[4,732,490,CGQ[v[0]][4]]]
for f in C:
Station(f[0],f[1],f[2],f[3])
pygame.display.update()
|
flexible
|
{
"blob_id": "609071fc3af1b526fbd4555ced2376f56ae0f3c3",
"index": 2174,
"step-1": "<mask token>\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\n<mask token>\n",
"step-2": "<mask token>\npygame.init()\n<mask token>\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\n<mask token>\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n",
"step-3": "<mask token>\npygame.init()\nBrack = [0, 0, 0]\nWhite = [255, 255, 255]\nGreen = [0, 255, 0]\nRed = [255, 0, 0]\nGray = [169, 169, 169]\nbutton_text = ['开 始', '开 始', '开 始', '开 始', '开 始']\nline = ['http://localhost:5050/mixer/000',\n 'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',\n 'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']\nline0 = ['http://localhost:5000/carrier/moveto/0',\n 'http://localhost:5000/carrier/moveto/1',\n 'http://localhost:5000/carrier/moveto/2',\n 'http://localhost:5000/carrier/moveto/3',\n 'http://localhost:5000/carrier/moveto/4']\nCGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],\n [1, 1, 1, 1, 0]]\ncolor = [Green, Green, Green, Green, Green]\nbutton_text0 = '手动状态:'\nbutton_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']\nNum = ['0', '1', '2', '3', '4']\nB0 = [452, 522, 592, 662, 732]\nscreen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\ntext = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']\ntext_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)\ntext_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)\ntext_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)\ntext_fmt0 = text_0.render('操 作 界 面', 2, Brack)\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n",
"step-4": "import time\nimport json\nimport pygame\nfrom pygame.locals import *\nimport urllib.request\nfrom pygame.color import THECOLORS\npygame.init()\nBrack = [0, 0, 0]\nWhite = [255, 255, 255]\nGreen = [0, 255, 0]\nRed = [255, 0, 0]\nGray = [169, 169, 169]\nbutton_text = ['开 始', '开 始', '开 始', '开 始', '开 始']\nline = ['http://localhost:5050/mixer/000',\n 'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',\n 'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']\nline0 = ['http://localhost:5000/carrier/moveto/0',\n 'http://localhost:5000/carrier/moveto/1',\n 'http://localhost:5000/carrier/moveto/2',\n 'http://localhost:5000/carrier/moveto/3',\n 'http://localhost:5000/carrier/moveto/4']\nCGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],\n [1, 1, 1, 1, 0]]\ncolor = [Green, Green, Green, Green, Green]\nbutton_text0 = '手动状态:'\nbutton_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']\nNum = ['0', '1', '2', '3', '4']\nB0 = [452, 522, 592, 662, 732]\nscreen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\ntext = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']\ntext_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)\ntext_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)\ntext_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)\ntext_fmt0 = text_0.render('操 作 界 面', 2, Brack)\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n",
"step-5": "import time\nimport json\nimport pygame\nfrom pygame.locals import *\nimport urllib.request\nfrom pygame.color import THECOLORS\npygame.init()\nBrack=[0,0,0]\nWhite=[255,255,255]\nGreen=[0,255,0]\nRed=[255,0,0]\nGray=[169,169,169]\nbutton_text=[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]\nline=['http://localhost:5050/mixer/000','http://localhost:5050/mixer/100','http://localhost:5050/mixer/200','http://localhost:5050/mixer/300','http://localhost:5050/mixer/400']\nline0=['http://localhost:5000/carrier/moveto/0','http://localhost:5000/carrier/moveto/1','http://localhost:5000/carrier/moveto/2','http://localhost:5000/carrier/moveto/3','http://localhost:5000/carrier/moveto/4']\nCGQ=[[0,1,1,1,1],[1,0,1,1,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,0]]\ncolor=[Green,Green,Green,Green,Green]\nbutton_text0=\"手动状态:\"\nbutton_text1=[\"工位0\",\"工位1\",\"工位2\",\"工位3\",\"工位4\"]\nNum=['0','1','2','3','4']\nB0=[452,522,592,662,732]\nscreen = pygame.display.set_mode((1240,768),FULLSCREEN,32)\nscreen.fill(Brack)\npygame.draw.rect(screen,White,[420,134,400,500],0)\ntext=[\"工 序 甲:\",\"工 序 乙:\",\"工 序 丙:\",\"工 序 丁:\",\"工 序 戊:\"]\ntext_0=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",22)\ntext_1=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",18)\ntext_2=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",15)\ntext_fmt0=text_0.render(\"操 作 界 面\",2,Brack)\nscreen.blit(text_fmt0,(545,140))\npygame.display.update()\ndef Process(num,x,y,button_text,color):\n text_fmt1=text_1.render(text[num],1,Brack)\n screen.blit(text_fmt1,(x-127,y))\n pygame.draw.rect(screen,Brack,[x,y,60,25],2)\n pygame.draw.rect(screen,color,[x+2,y+2,57,22],0)\n button=text_2.render(button_text,1,Brack)\n screen.blit(button,(x+13,y+3))\n pygame.display.update()\ndef Station(num,x,y,a):\n pygame.draw.rect(screen,Brack,[x,y,55,28],2)\n pygame.draw.rect(screen,Green,[x+2,y+2,52,25],0)\n button=text_2.render(button_text1[num],1,Brack)\n screen.blit(button,(x+9,y+4))\n img=pygame.image.load('cgq.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(x,y+80))\n button=text_1.render(Num[a],1,Brack)\n screen.blit(button,(x+20,610))\n pygame.display.update()\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen,White,[506,440,85,28],0)\n pygame.draw.rect(screen,Brack,[597,440,65,28],2)\n pygame.draw.rect(screen,Green,[599,442,62,25],0)\n button1=text_1.render(\"切 换\",1,Brack)\n screen.blit(button1,(611,444))\n button=text_1.render(button_text0,1,Brack)\n screen.blit(button,(506,444))\n B=[[0,647,190,button_text[0],color[0]],[1,647,240,button_text[1],color[1]],[2,647,290,button_text[2],color[2]],[3,647,340,button_text[3],color[3]],[4,647,390,button_text[4],color[4]]]\n if button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n response2=urllib.request.urlopen('http://localhost:5000/carrier/status')\n html2=response2.read()\n text2=json.loads(html2)\n a=text2['sensors']\n b=text2['pos']\n C=[[0,452,490,a[0]],[1,522,490,a[1]],[2,592,490,a[2]],[3,662,490,a[3]],[4,732,490,a[4]]]\n pygame.draw.rect(screen,White,[420,525,400,50],0)\n pygame.draw.rect(screen,White,[420,615,400,30],0)\n img=pygame.image.load('car.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(B0[b],525))\n if button_text0==\"手动状态:\":\n for t in range(5):\n if button_text[t]==\"结 束\":\n button_text[t]=\"开 始\"\n color[t]=Green\n elif button_text0==\"自动状态:\":\n if button_text[0]==\"结 束\":\n response0=urllib.request.urlopen(line[0])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[0]=\"开 始\"\n button_text[1]=\"结 束\"\n elif button_text[1]==\"结 束\":\n response0=urllib.request.urlopen(line[1])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[1]=\"开 始\"\n button_text[2]=\"结 束\"\n elif button_text[2]==\"结 束\":\n response0=urllib.request.urlopen(line[2])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[2]=\"开 始\"\n button_text[3]=\"结 束\"\n elif button_text[3]==\"结 束\":\n response0=urllib.request.urlopen(line[3])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[3]=\"开 始\"\n button_text[4]=\"结 束\"\n elif button_text[4]==\"结 束\":\n response0=urllib.request.urlopen(line[4])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[4]=\"开 始\"\n for i in B:\n Process(i[0],i[1],i[2],i[3],i[4])\n for v in C:\n Station(v[0],v[1],v[2],v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index==0:\n if 597<=pos[0]<=662 and 440<=pos[1]<=468:\n if button_text0==\"自动状态:\" and button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n button_text0=\"手动状态:\"\n color=[Green,Green,Green,Green,Green]\n elif button_text0==\"手动状态:\" and button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n button_text0=\"自动状态:\"\n button_text[0]=\"结 束\"\n color=[Gray,Gray,Gray,Gray,Gray]\n for i in B:\n if i[1]<=pos[0]<=i[1]+60 and i[2]<=pos[1]<=i[2]+25:\n if button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"] and button_text0==\"手动状态:\":\n color[i[0]]=Red\n button_text[i[0]]=\"结 束\"\n response1=urllib.request.urlopen(line[i[0]])\n html1=response1.read()\n text1=json.loads(html1)\n print(text1)\n for v in C:\n if v[1]<=pos[0]<=v[1]+60 and v[2]<=pos[1]<=v[2]+28:\n response3=urllib.request.urlopen(line0[v[0]])\n html3=response3.read()\n text3=json.loads(html3)\n pygame.draw.rect(screen,White,[420,525,400,50],0)\n pygame.draw.rect(screen,White,[420,615,400,30],0)\n img=pygame.image.load('car.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(B0[int(text3)],525))\n C=[[0,452,490,CGQ[v[0]][0]],[1,522,490,CGQ[v[0]][1]],[2,592,490,CGQ[v[0]][2]],[3,662,490,CGQ[v[0]][3]],[4,732,490,CGQ[v[0]][4]]]\n for f in C:\n Station(f[0],f[1],f[2],f[3])\n pygame.display.update()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class DevelopmentConfig(Config):
DEBUG = True
CORS_ALLOWED_ORIGINS = 'developmentexample.com'
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProductionConfig(Config):
<|reserved_special_token_0|>
class DevelopmentConfig(Config):
DEBUG = True
CORS_ALLOWED_ORIGINS = 'developmentexample.com'
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
class Config(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ProductionConfig(Config):
CORS_ALLOWED_ORIGINS = 'productionexample.com'
class DevelopmentConfig(Config):
DEBUG = True
CORS_ALLOWED_ORIGINS = 'developmentexample.com'
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
class Config(object):
DEBUG = False
TESTING = False
class ProductionConfig(Config):
CORS_ALLOWED_ORIGINS = 'productionexample.com'
class DevelopmentConfig(Config):
DEBUG = True
CORS_ALLOWED_ORIGINS = 'developmentexample.com'
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
class Config(object):
DEBUG = False
TESTING = False
class ProductionConfig(Config):
CORS_ALLOWED_ORIGINS = "productionexample.com"
class DevelopmentConfig(Config):
DEBUG = True
CORS_ALLOWED_ORIGINS = "developmentexample.com"
class TestingConfig(Config):
TESTING = True
|
flexible
|
{
"blob_id": "b76c868a29b5edd07d0da60b1a13ddb4ac3e2913",
"index": 6988,
"step-1": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-2": "<mask token>\n\n\nclass ProductionConfig(Config):\n <mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-3": "class Config(object):\n <mask token>\n <mask token>\n\n\nclass ProductionConfig(Config):\n CORS_ALLOWED_ORIGINS = 'productionexample.com'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-4": "class Config(object):\n DEBUG = False\n TESTING = False\n\n\nclass ProductionConfig(Config):\n CORS_ALLOWED_ORIGINS = 'productionexample.com'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = 'developmentexample.com'\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-5": "class Config(object):\n DEBUG = False\n TESTING = False\n\n\nclass ProductionConfig(Config):\n CORS_ALLOWED_ORIGINS = \"productionexample.com\"\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n CORS_ALLOWED_ORIGINS = \"developmentexample.com\"\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 Linh Pham
# wwdtm_panelistvspanelist is relased under the terms of the Apache License 2.0
"""WWDTM Panelist Appearance Report Generator"""
import argparse
from collections import OrderedDict
from datetime import datetime
import json
import os
import shutil
from typing import List, Dict, Text
import mysql.connector
import pytz
from jinja2 import Environment, FileSystemLoader
def retrieve_panelist_appearance_counts(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve yearly apperance count for the requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE pm.panelistid = %s AND s.bestof = 0 "
"AND s.repeatshowid IS NULL "
"GROUP BY p.panelist, YEAR(s.showdate) "
"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC")
cursor.execute(query, (panelist_id, ))
result = cursor.fetchall()
if not result:
return None
appearances = OrderedDict()
total_appearances = 0
for row in result:
appearances[row[0]] = row[1]
total_appearances += row[1]
appearances["total"] = total_appearances
return appearances
def retrieve_all_panelist_appearance_counts(database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve all appearance counts for all panelists from the
database"""
cursor = database_connection.cursor()
query = ("SELECT DISTINCT p.panelistid, p.panelist "
"FROM ww_showpnlmap pm "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE s.bestof = 0 AND s.repeatshowid IS NULL "
"ORDER BY p.panelist ASC")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
panelists = []
for row in result:
panelist = {}
panelist_id = row[0]
panelist["name"] = row[1]
appearances = retrieve_panelist_appearance_counts(panelist_id=panelist_id,
database_connection=database_connection)
panelist["appearances"] = appearances
panelists.append(panelist)
return panelists
def retrieve_all_years(database_connection: mysql.connector.connect) -> List[int]:
"""Retrieve a list of all available show years"""
cursor = database_connection.cursor()
query = ("SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s "
"ORDER BY YEAR(s.showdate) ASC")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
years = []
for row in result:
years.append(row[0])
return years
def load_config():
"""Load configuration values from configuration file and from
options passed into script execution"""
# Read in configuration file for default values
with open("config.json", "r") as config_file:
config_dict = json.load(config_file)
# Read in options passed in that override values from the config.json file
parser = argparse.ArgumentParser()
parser.add_argument("--ga-property-code",
dest="ga_property_code",
type=str,
help="Google Analytics Property Code (default: %(default)s)",
default=config_dict["report"]["ga_property_code"])
parser.add_argument("--css-directory",
dest="css_directory",
type=str,
help="Directory where the base CSS stylesheet file is stored "
"(default: %(default)s)",
default=config_dict["report"]["css_directory"])
parser.add_argument("--css-filename",
dest="css_filename",
type=str,
help="File name of the report CSS stylesheet file "
"(default: %(default)s)",
default=config_dict["report"]["css_filename"])
parser.add_argument("--output-directory",
dest="output_directory",
type=str,
help="Directory where the generated report will be saved "
"(default: %(default)s)",
default=config_dict["report"]["output_directory"])
parser.add_argument("--output-filename",
dest="output_filename",
type=str,
help="File name of the generated report will be saved "
"(default: %(default)s)",
default=config_dict["report"]["output_filename"])
args = parser.parse_args()
# Override the values from the config.json file if values were set via argparse
if args.ga_property_code != config_dict["report"]["ga_property_code"]:
config_dict["report"]["ga_property_code"] = args.ga_property_code
if args.css_directory != config_dict["report"]["css_directory"]:
config_dict["report"]["css_directory"] = args.css_directory
if args.css_filename != config_dict["report"]["css_filename"]:
config_dict["report"]["css_filename"] = args.css_filename
if args.output_directory != config_dict["report"]["output_directory"]:
config_dict["report"]["output_directory"] = args.output_directory
if args.output_filename != config_dict["report"]["output_filename"]:
config_dict["report"]["output_filename"] = args.output_filename
return config_dict
def render_report(show_years: List[int],
panelists: List[Dict],
report_settings: Dict
) -> Text:
"""Render appearances report using Jinja2"""
# Setup Jinja2 Template
template_loader = FileSystemLoader("./template")
template_env = Environment(loader=template_loader,
trim_blocks=True,
lstrip_blocks=True)
template_file = "report.tmpl.html"
template = template_env.get_template(template_file)
# Generate timestamp to include in page footer
time_zone = pytz.timezone("America/Los_Angeles")
rendered_date_time = datetime.now(time_zone)
# Build dictionary to pass into template renderer
render_data = {}
render_data["show_years"] = show_years
render_data["panelists"] = panelists
render_data["settings"] = report_settings
render_data["rendered_at"] = rendered_date_time.strftime("%A, %B %d, %Y %H:%M:%S %Z")
# Render the report and write out to output directory
report = template.render(render_data=render_data)
return report
def generate_output_files(rendered_report: Text,
report_settings: Dict) -> None:
"""Writes out the generated report file to file in the output directory
and copies the base CSS file to the same directory"""
css_path = os.path.join(report_settings["css_directory"],
report_settings["css_filename"])
output_path = os.path.join(report_settings["output_directory"],
report_settings["output_filename"])
# Create the output directory if it does not exist
if not os.path.isdir(report_settings["output_directory"]):
os.mkdir(report_settings["output_directory"])
# Write out the generated report
with open(output_path, "w") as output_file:
if output_file.writable():
output_file.write(rendered_report)
else:
print("Error: {} is not writable".format(output_path))
# Copy CSS file into output directory
shutil.copy2(css_path, report_settings["output_directory"])
return
def main():
"""Bootstrap database connection, retrieve panelist appearance data,
generate the report and create an output bundle"""
app_config = load_config()
database_connection = mysql.connector.connect(**app_config["database"])
panelists = retrieve_all_panelist_appearance_counts(database_connection)
show_years = retrieve_all_years(database_connection)
rendered_report = render_report(show_years=show_years,
panelists=panelists,
report_settings=app_config["report"])
generate_output_files(rendered_report=rendered_report,
report_settings=app_config["report"])
# Only run if executed as a script and not imported
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "2d20bac0f11fa724b2d0a2e0676e5b9ce7682777",
"index": 7387,
"step-1": "<mask token>\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect) ->List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'\n )\n cursor.execute(query, (panelist_id,))\n result = cursor.fetchall()\n if not result:\n return None\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n appearances['total'] = total_appearances\n return appearances\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect) ->List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'\n )\n cursor.execute(query, (panelist_id,))\n result = cursor.fetchall()\n if not result:\n return None\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n appearances['total'] = total_appearances\n return appearances\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2019 Linh Pham\n# wwdtm_panelistvspanelist is relased under the terms of the Apache License 2.0\n\"\"\"WWDTM Panelist Appearance Report Generator\"\"\"\n\nimport argparse\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport json\nimport os\nimport shutil\nfrom typing import List, Dict, Text\nimport mysql.connector\nimport pytz\n\nfrom jinja2 import Environment, FileSystemLoader\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect\n ) -> List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n\n cursor = database_connection.cursor()\n query = (\"SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"JOIN ww_panelists p ON p.panelistid = pm.panelistid \"\n \"WHERE pm.panelistid = %s AND s.bestof = 0 \"\n \"AND s.repeatshowid IS NULL \"\n \"GROUP BY p.panelist, YEAR(s.showdate) \"\n \"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC\")\n cursor.execute(query, (panelist_id, ))\n result = cursor.fetchall()\n\n if not result:\n return None\n\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n\n appearances[\"total\"] = total_appearances\n return appearances\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.connector.connect\n ) -> List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n\n cursor = database_connection.cursor()\n query = (\"SELECT DISTINCT p.panelistid, p.panelist \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_panelists p ON p.panelistid = pm.panelistid \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"ORDER BY p.panelist ASC\")\n cursor.execute(query)\n result = cursor.fetchall()\n\n if not result:\n return None\n\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist[\"name\"] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=panelist_id,\n database_connection=database_connection)\n panelist[\"appearances\"] = appearances\n panelists.append(panelist)\n\n return panelists\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) -> List[int]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\"SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s \"\n \"ORDER BY YEAR(s.showdate) ASC\")\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n\n if not result:\n return None\n\n years = []\n for row in result:\n years.append(row[0])\n\n return years\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n\n # Read in configuration file for default values\n with open(\"config.json\", \"r\") as config_file:\n config_dict = json.load(config_file)\n\n # Read in options passed in that override values from the config.json file\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ga-property-code\",\n dest=\"ga_property_code\",\n type=str,\n help=\"Google Analytics Property Code (default: %(default)s)\",\n default=config_dict[\"report\"][\"ga_property_code\"])\n parser.add_argument(\"--css-directory\",\n dest=\"css_directory\",\n type=str,\n help=\"Directory where the base CSS stylesheet file is stored \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"css_directory\"])\n parser.add_argument(\"--css-filename\",\n dest=\"css_filename\",\n type=str,\n help=\"File name of the report CSS stylesheet file \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"css_filename\"])\n parser.add_argument(\"--output-directory\",\n dest=\"output_directory\",\n type=str,\n help=\"Directory where the generated report will be saved \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"output_directory\"])\n parser.add_argument(\"--output-filename\",\n dest=\"output_filename\",\n type=str,\n help=\"File name of the generated report will be saved \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"output_filename\"])\n args = parser.parse_args()\n\n # Override the values from the config.json file if values were set via argparse\n if args.ga_property_code != config_dict[\"report\"][\"ga_property_code\"]:\n config_dict[\"report\"][\"ga_property_code\"] = args.ga_property_code\n\n if args.css_directory != config_dict[\"report\"][\"css_directory\"]:\n config_dict[\"report\"][\"css_directory\"] = args.css_directory\n\n if args.css_filename != config_dict[\"report\"][\"css_filename\"]:\n config_dict[\"report\"][\"css_filename\"] = args.css_filename\n\n if args.output_directory != config_dict[\"report\"][\"output_directory\"]:\n config_dict[\"report\"][\"output_directory\"] = args.output_directory\n\n if args.output_filename != config_dict[\"report\"][\"output_filename\"]:\n config_dict[\"report\"][\"output_filename\"] = args.output_filename\n\n return config_dict\n\ndef render_report(show_years: List[int],\n panelists: List[Dict],\n report_settings: Dict\n ) -> Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n\n # Setup Jinja2 Template\n template_loader = FileSystemLoader(\"./template\")\n template_env = Environment(loader=template_loader,\n trim_blocks=True,\n lstrip_blocks=True)\n template_file = \"report.tmpl.html\"\n template = template_env.get_template(template_file)\n\n # Generate timestamp to include in page footer\n time_zone = pytz.timezone(\"America/Los_Angeles\")\n rendered_date_time = datetime.now(time_zone)\n\n # Build dictionary to pass into template renderer\n render_data = {}\n render_data[\"show_years\"] = show_years\n render_data[\"panelists\"] = panelists\n render_data[\"settings\"] = report_settings\n render_data[\"rendered_at\"] = rendered_date_time.strftime(\"%A, %B %d, %Y %H:%M:%S %Z\")\n\n # Render the report and write out to output directory\n report = template.render(render_data=render_data)\n return report\n\ndef generate_output_files(rendered_report: Text,\n report_settings: Dict) -> None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n\n css_path = os.path.join(report_settings[\"css_directory\"],\n report_settings[\"css_filename\"])\n output_path = os.path.join(report_settings[\"output_directory\"],\n report_settings[\"output_filename\"])\n\n # Create the output directory if it does not exist\n if not os.path.isdir(report_settings[\"output_directory\"]):\n os.mkdir(report_settings[\"output_directory\"])\n\n # Write out the generated report\n with open(output_path, \"w\") as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print(\"Error: {} is not writable\".format(output_path))\n\n # Copy CSS file into output directory\n shutil.copy2(css_path, report_settings[\"output_directory\"])\n\n return\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])\n\n# Only run if executed as a script and not imported\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
import os, sys, string
import linecache, math
import numpy as np
import datetime , time
from pople import NFC
from pople import uniqatoms
from pople import orca_printbas
####### orca_run - S
def orca_run(method, basis,optfreq,custombasis, correlated, values, charge, multip, sym, R_coord):
"""
Runs orca
Parameters:
method (char) : Name of functional to be used
basis (char) : Basis set name
optfreq (char) : true/false value of the optfreq keyword
custombasis (char) : true/false value of the custombasis keyword
correlated (char) : true/false value of the correlated keyword
values (dict): Values of the control variables
"""
with open("input.com", "w") as com_f:
if optfreq == "true":
if values["verticalIP"] != "true" or values["IPss"] != "true": # IPss not defined
if values["MGGA"] == "true":
Freqstr="NumFreq"
else:
Freqstr="Freq"
if custombasis == "true":
com_f.write("! " +str(method) + " " + values["String_Opt"] + " " + Freqstr + " \n")
else:
com_f.write("! " +str(method) + " " + str(basis) +" "+values["String_Opt"] + " " + Freqstr + " \n")
else:
if custombasis == "true":
com_f.write("! " +str(method) + " " + Freqstr + " \n")
else:
com_f.write("! " +str(method) + " " + str(basis) + " " + Freqstr + " \n")
else:
if custombasis == "true":
com_f.write("! " +str(method) + " \n")
else:
com_f.write("! " +str(method) + " " + str(basis) + " \n")
Nat=len(sym)
com_f.write("*xyz "+str(charge)+" "+str(multip) + "\n")
for tmp in range(Nat):
R_x=float(R_coord[tmp][0])
R_y=float(R_coord[tmp][1])
R_z=float(R_coord[tmp][2])
com_f.write(' {:2s}{:15.8f}{:15.8f}{:15.8f}\n'.format(sym[tmp],R_x,R_y,R_z))
com_f.write("*\n")
com_f.write("%MaxCore " + values["maxcore_mb"] + "\n")
com_f.write("%scf\n MaxIter 500 \n")
com_f.write(" Convergence " + values["conv_scf"] + "\n")
com_f.write("end\n")
if values["switch_guess"] == "true": ### this is not part of the inp file!!!
if values["guess_TM"] == "true" and values["G4MP2TM"]:
com_f.write(" Guess = " + values["option_guess"] + "\n")
com_f.write("end\n")
if values["switch_load_rel_file"] == "true":
f1 = open("rel_file.txt", "r")
com_f.write(f1.read())
f1.close()
with open("Thermochemistry.out", "a") as ther_chem:
ther_chem.write("check if rel_file.txt exists!!")
if values["SCFDIIS"] == "true":
com_f.write("%scf\n DIISMaxEq 15\n")
com_f.write(" directresetfreq 1\n")
com_f.write("end\n")
if values["LSHIFT"] == "true":
com_f.write("%scf\n")
com_f.write(" Shift Shift 0.1 ErrOff 0.1 end\n")
com_f.write("end\n")
if values["SOSCF"] == "true":
com_f.write("%scf\n")
com_f.write(" soscfmaxit 12\n")
com_f.write(" directresetfreq 1\n")
com_f.write("end\n")
if values["switch_DLPNO_CCSDT"] == "true":
com_f.write("%mdci\n")
com_f.write(" UseFullLMP2Guess true\n")
com_f.write(" TcutDOPre = " + str(values["TcutDOPre"]) +"\n") #TODO Is this really needed?
com_f.write("end\n")
if ( float(values["Ntotale"]) <= float(values["nproc"]) ) or ( (float(values["Ntotale"])-float(values["Ntotalecore"])) < float(values["nproc"]) ):
com_f.write("%pal nprocs 1 \n")
else:
com_f.write("%pal nprocs "+values["nproc"]+" \n")
com_f.write("end\n")
com_f.write("%method\n") ## CHECK
com_f.write(" IntAcc 7.0\n")
if values["optdiis"] == "true":
com_f.write(" Z_solver DIIS\n")
com_f.write(" Z_MaxIter 300\n")
if correlated == "true":
uniq_atom_res = uniqatoms(sym)
if values["ALLELE"] == "true": ### CHECK!!!!
for iat in range(int(uniq_atom_res["N_ua"])):
pre1 = uniq_atom_res["uniq_sym"]
at_pr1 = pre1[iat]
com_f.write(" NewNCore " + at_pr1 + " " + " 0 end\n")
else:
for iat in range(int(uniq_atom_res["N_ua"])):
pre1 = uniq_atom_res["uniq_sym"]
at_pr1 = pre1[iat]
NFC_res = NFC(at_pr1)
com_f.write(" NewNCore " + at_pr1 + " " + str(NFC_res) +" end\n")
com_f.write("end\n")
if optfreq == "true":
com_f.write("%geom\n")
if values["MGGA"] == "true":
com_f.write(" Calc_Hess true; NumHess true\n")
else:
com_f.write(" Calc_Hess true\n")
com_f.write(" Recalc_Hess " + str(values["iterhess"]) +" \n") ## revisit !!!! CHECK!!! IMPORTANT
com_f.write("end\n")
com_f.write("%freq Temp 273.15, 298.15\n")
com_f.write("end\n")
if custombasis == "true":
com_f.write("%basis \n")
if custombasis == "true":
uniq_atom_res = uniqatoms(sym)
fname = basis
if Nat == 1:
orca_printbas(fname, sym[0])
else:
for iat1 in range(int(uniq_atom_res["N_ua"])):
orca_printbas(fname, uniq_atom_res["uniq_sym"][iat1]) # GTBAS1 C
with open("input.com", "a") as com_f:
com_f.write("end\n")
os.system(values["orca_exe"] + " input.com > input.out")
os.system("cat input.com >> ORCA.inp")
os.system("cat input.out >> ORCA.out")
#os.system("rm -f input*")
####### orca_run - E
|
normal
|
{
"blob_id": "019e8d7159fe07adc245e6476ac1fed5e9c457b5",
"index": 3035,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef orca_run(method, basis, optfreq, custombasis, correlated, values,\n charge, multip, sym, R_coord):\n \"\"\"\n Runs orca\n\n Parameters:\n method (char) : Name of functional to be used\n basis (char) : Basis set name\n optfreq (char) : true/false value of the optfreq keyword \n custombasis (char) : true/false value of the custombasis keyword\n correlated (char) : true/false value of the correlated keyword \n values (dict): Values of the control variables \n\n \"\"\"\n with open('input.com', 'w') as com_f:\n if optfreq == 'true':\n if values['verticalIP'] != 'true' or values['IPss'] != 'true':\n if values['MGGA'] == 'true':\n Freqstr = 'NumFreq'\n else:\n Freqstr = 'Freq'\n if custombasis == 'true':\n com_f.write('! ' + str(method) + ' ' + values[\n 'String_Opt'] + ' ' + Freqstr + ' \\n')\n else:\n com_f.write('! ' + str(method) + ' ' + str(basis) +\n ' ' + values['String_Opt'] + ' ' + Freqstr + ' \\n')\n elif custombasis == 'true':\n com_f.write('! ' + str(method) + ' ' + Freqstr + ' \\n')\n else:\n com_f.write('! ' + str(method) + ' ' + str(basis) + ' ' +\n Freqstr + ' \\n')\n elif custombasis == 'true':\n com_f.write('! ' + str(method) + ' \\n')\n else:\n com_f.write('! ' + str(method) + ' ' + str(basis) + ' \\n')\n Nat = len(sym)\n com_f.write('*xyz ' + str(charge) + ' ' + str(multip) + '\\n')\n for tmp in range(Nat):\n R_x = float(R_coord[tmp][0])\n R_y = float(R_coord[tmp][1])\n R_z = float(R_coord[tmp][2])\n com_f.write(' {:2s}{:15.8f}{:15.8f}{:15.8f}\\n'.format(sym[tmp],\n R_x, R_y, R_z))\n com_f.write('*\\n')\n com_f.write('%MaxCore ' + values['maxcore_mb'] + '\\n')\n com_f.write('%scf\\n MaxIter 500 \\n')\n com_f.write(' Convergence ' + values['conv_scf'] + '\\n')\n com_f.write('end\\n')\n if values['switch_guess'] == 'true':\n if values['guess_TM'] == 'true' and values['G4MP2TM']:\n com_f.write(' Guess = ' + values['option_guess'] + '\\n')\n com_f.write('end\\n')\n if values['switch_load_rel_file'] == 'true':\n f1 = open('rel_file.txt', 'r')\n com_f.write(f1.read())\n f1.close()\n with open('Thermochemistry.out', 'a') as ther_chem:\n ther_chem.write('check if rel_file.txt exists!!')\n if values['SCFDIIS'] == 'true':\n com_f.write('%scf\\n DIISMaxEq 15\\n')\n com_f.write(' directresetfreq 1\\n')\n com_f.write('end\\n')\n if values['LSHIFT'] == 'true':\n com_f.write('%scf\\n')\n com_f.write(' Shift Shift 0.1 ErrOff 0.1 end\\n')\n com_f.write('end\\n')\n if values['SOSCF'] == 'true':\n com_f.write('%scf\\n')\n com_f.write(' soscfmaxit 12\\n')\n com_f.write(' directresetfreq 1\\n')\n com_f.write('end\\n')\n if values['switch_DLPNO_CCSDT'] == 'true':\n com_f.write('%mdci\\n')\n com_f.write(' UseFullLMP2Guess true\\n')\n com_f.write(' TcutDOPre = ' + str(values['TcutDOPre']) + '\\n')\n com_f.write('end\\n')\n if float(values['Ntotale']) <= float(values['nproc']) or float(values\n ['Ntotale']) - float(values['Ntotalecore']) < float(values['nproc']\n ):\n com_f.write('%pal nprocs 1 \\n')\n else:\n com_f.write('%pal nprocs ' + values['nproc'] + ' \\n')\n com_f.write('end\\n')\n com_f.write('%method\\n')\n com_f.write(' IntAcc 7.0\\n')\n if values['optdiis'] == 'true':\n com_f.write(' Z_solver DIIS\\n')\n com_f.write(' Z_MaxIter 300\\n')\n if correlated == 'true':\n uniq_atom_res = uniqatoms(sym)\n if values['ALLELE'] == 'true':\n for iat in range(int(uniq_atom_res['N_ua'])):\n pre1 = uniq_atom_res['uniq_sym']\n at_pr1 = pre1[iat]\n com_f.write(' NewNCore ' + at_pr1 + ' ' + ' 0 end\\n')\n else:\n for iat in range(int(uniq_atom_res['N_ua'])):\n pre1 = uniq_atom_res['uniq_sym']\n at_pr1 = pre1[iat]\n NFC_res = NFC(at_pr1)\n com_f.write(' NewNCore ' + at_pr1 + ' ' + str(NFC_res\n ) + ' end\\n')\n com_f.write('end\\n')\n if optfreq == 'true':\n com_f.write('%geom\\n')\n if values['MGGA'] == 'true':\n com_f.write(' Calc_Hess true; NumHess true\\n')\n else:\n com_f.write(' Calc_Hess true\\n')\n com_f.write(' Recalc_Hess ' + str(values['iterhess']) + ' \\n')\n com_f.write('end\\n')\n com_f.write('%freq Temp 273.15, 298.15\\n')\n com_f.write('end\\n')\n if custombasis == 'true':\n com_f.write('%basis \\n')\n if custombasis == 'true':\n uniq_atom_res = uniqatoms(sym)\n fname = basis\n if Nat == 1:\n orca_printbas(fname, sym[0])\n else:\n for iat1 in range(int(uniq_atom_res['N_ua'])):\n orca_printbas(fname, uniq_atom_res['uniq_sym'][iat1])\n with open('input.com', 'a') as com_f:\n com_f.write('end\\n')\n os.system(values['orca_exe'] + ' input.com > input.out')\n os.system('cat input.com >> ORCA.inp')\n os.system('cat input.out >> ORCA.out')\n",
"step-3": "import os, sys, string\nimport linecache, math\nimport numpy as np\nimport datetime, time\nfrom pople import NFC\nfrom pople import uniqatoms\nfrom pople import orca_printbas\n\n\ndef orca_run(method, basis, optfreq, custombasis, correlated, values,\n charge, multip, sym, R_coord):\n \"\"\"\n Runs orca\n\n Parameters:\n method (char) : Name of functional to be used\n basis (char) : Basis set name\n optfreq (char) : true/false value of the optfreq keyword \n custombasis (char) : true/false value of the custombasis keyword\n correlated (char) : true/false value of the correlated keyword \n values (dict): Values of the control variables \n\n \"\"\"\n with open('input.com', 'w') as com_f:\n if optfreq == 'true':\n if values['verticalIP'] != 'true' or values['IPss'] != 'true':\n if values['MGGA'] == 'true':\n Freqstr = 'NumFreq'\n else:\n Freqstr = 'Freq'\n if custombasis == 'true':\n com_f.write('! ' + str(method) + ' ' + values[\n 'String_Opt'] + ' ' + Freqstr + ' \\n')\n else:\n com_f.write('! ' + str(method) + ' ' + str(basis) +\n ' ' + values['String_Opt'] + ' ' + Freqstr + ' \\n')\n elif custombasis == 'true':\n com_f.write('! ' + str(method) + ' ' + Freqstr + ' \\n')\n else:\n com_f.write('! ' + str(method) + ' ' + str(basis) + ' ' +\n Freqstr + ' \\n')\n elif custombasis == 'true':\n com_f.write('! ' + str(method) + ' \\n')\n else:\n com_f.write('! ' + str(method) + ' ' + str(basis) + ' \\n')\n Nat = len(sym)\n com_f.write('*xyz ' + str(charge) + ' ' + str(multip) + '\\n')\n for tmp in range(Nat):\n R_x = float(R_coord[tmp][0])\n R_y = float(R_coord[tmp][1])\n R_z = float(R_coord[tmp][2])\n com_f.write(' {:2s}{:15.8f}{:15.8f}{:15.8f}\\n'.format(sym[tmp],\n R_x, R_y, R_z))\n com_f.write('*\\n')\n com_f.write('%MaxCore ' + values['maxcore_mb'] + '\\n')\n com_f.write('%scf\\n MaxIter 500 \\n')\n com_f.write(' Convergence ' + values['conv_scf'] + '\\n')\n com_f.write('end\\n')\n if values['switch_guess'] == 'true':\n if values['guess_TM'] == 'true' and values['G4MP2TM']:\n com_f.write(' Guess = ' + values['option_guess'] + '\\n')\n com_f.write('end\\n')\n if values['switch_load_rel_file'] == 'true':\n f1 = open('rel_file.txt', 'r')\n com_f.write(f1.read())\n f1.close()\n with open('Thermochemistry.out', 'a') as ther_chem:\n ther_chem.write('check if rel_file.txt exists!!')\n if values['SCFDIIS'] == 'true':\n com_f.write('%scf\\n DIISMaxEq 15\\n')\n com_f.write(' directresetfreq 1\\n')\n com_f.write('end\\n')\n if values['LSHIFT'] == 'true':\n com_f.write('%scf\\n')\n com_f.write(' Shift Shift 0.1 ErrOff 0.1 end\\n')\n com_f.write('end\\n')\n if values['SOSCF'] == 'true':\n com_f.write('%scf\\n')\n com_f.write(' soscfmaxit 12\\n')\n com_f.write(' directresetfreq 1\\n')\n com_f.write('end\\n')\n if values['switch_DLPNO_CCSDT'] == 'true':\n com_f.write('%mdci\\n')\n com_f.write(' UseFullLMP2Guess true\\n')\n com_f.write(' TcutDOPre = ' + str(values['TcutDOPre']) + '\\n')\n com_f.write('end\\n')\n if float(values['Ntotale']) <= float(values['nproc']) or float(values\n ['Ntotale']) - float(values['Ntotalecore']) < float(values['nproc']\n ):\n com_f.write('%pal nprocs 1 \\n')\n else:\n com_f.write('%pal nprocs ' + values['nproc'] + ' \\n')\n com_f.write('end\\n')\n com_f.write('%method\\n')\n com_f.write(' IntAcc 7.0\\n')\n if values['optdiis'] == 'true':\n com_f.write(' Z_solver DIIS\\n')\n com_f.write(' Z_MaxIter 300\\n')\n if correlated == 'true':\n uniq_atom_res = uniqatoms(sym)\n if values['ALLELE'] == 'true':\n for iat in range(int(uniq_atom_res['N_ua'])):\n pre1 = uniq_atom_res['uniq_sym']\n at_pr1 = pre1[iat]\n com_f.write(' NewNCore ' + at_pr1 + ' ' + ' 0 end\\n')\n else:\n for iat in range(int(uniq_atom_res['N_ua'])):\n pre1 = uniq_atom_res['uniq_sym']\n at_pr1 = pre1[iat]\n NFC_res = NFC(at_pr1)\n com_f.write(' NewNCore ' + at_pr1 + ' ' + str(NFC_res\n ) + ' end\\n')\n com_f.write('end\\n')\n if optfreq == 'true':\n com_f.write('%geom\\n')\n if values['MGGA'] == 'true':\n com_f.write(' Calc_Hess true; NumHess true\\n')\n else:\n com_f.write(' Calc_Hess true\\n')\n com_f.write(' Recalc_Hess ' + str(values['iterhess']) + ' \\n')\n com_f.write('end\\n')\n com_f.write('%freq Temp 273.15, 298.15\\n')\n com_f.write('end\\n')\n if custombasis == 'true':\n com_f.write('%basis \\n')\n if custombasis == 'true':\n uniq_atom_res = uniqatoms(sym)\n fname = basis\n if Nat == 1:\n orca_printbas(fname, sym[0])\n else:\n for iat1 in range(int(uniq_atom_res['N_ua'])):\n orca_printbas(fname, uniq_atom_res['uniq_sym'][iat1])\n with open('input.com', 'a') as com_f:\n com_f.write('end\\n')\n os.system(values['orca_exe'] + ' input.com > input.out')\n os.system('cat input.com >> ORCA.inp')\n os.system('cat input.out >> ORCA.out')\n",
"step-4": "import os, sys, string\nimport linecache, math\nimport numpy as np\nimport datetime , time\n\n\nfrom pople import NFC\nfrom pople import uniqatoms\nfrom pople import orca_printbas\n\n\n####### orca_run - S\ndef orca_run(method, basis,optfreq,custombasis, correlated, values, charge, multip, sym, R_coord):\n \"\"\"\n Runs orca\n\n Parameters:\n method (char) : Name of functional to be used\n basis (char) : Basis set name\n optfreq (char) : true/false value of the optfreq keyword \n custombasis (char) : true/false value of the custombasis keyword\n correlated (char) : true/false value of the correlated keyword \n values (dict): Values of the control variables \n\n \"\"\"\n with open(\"input.com\", \"w\") as com_f:\n if optfreq == \"true\":\n if values[\"verticalIP\"] != \"true\" or values[\"IPss\"] != \"true\": # IPss not defined\n if values[\"MGGA\"] == \"true\":\n Freqstr=\"NumFreq\"\n else:\n Freqstr=\"Freq\"\n \n if custombasis == \"true\":\n com_f.write(\"! \" +str(method) + \" \" + values[\"String_Opt\"] + \" \" + Freqstr + \" \\n\")\n else:\n com_f.write(\"! \" +str(method) + \" \" + str(basis) +\" \"+values[\"String_Opt\"] + \" \" + Freqstr + \" \\n\")\n else:\n if custombasis == \"true\":\n com_f.write(\"! \" +str(method) + \" \" + Freqstr + \" \\n\")\n else:\n com_f.write(\"! \" +str(method) + \" \" + str(basis) + \" \" + Freqstr + \" \\n\")\n else:\n if custombasis == \"true\":\n com_f.write(\"! \" +str(method) + \" \\n\")\n else:\n com_f.write(\"! \" +str(method) + \" \" + str(basis) + \" \\n\")\n\n Nat=len(sym)\n com_f.write(\"*xyz \"+str(charge)+\" \"+str(multip) + \"\\n\")\n for tmp in range(Nat):\n R_x=float(R_coord[tmp][0])\n R_y=float(R_coord[tmp][1])\n R_z=float(R_coord[tmp][2])\n com_f.write(' {:2s}{:15.8f}{:15.8f}{:15.8f}\\n'.format(sym[tmp],R_x,R_y,R_z)) \n com_f.write(\"*\\n\")\n\n com_f.write(\"%MaxCore \" + values[\"maxcore_mb\"] + \"\\n\")\n com_f.write(\"%scf\\n MaxIter 500 \\n\")\n com_f.write(\" Convergence \" + values[\"conv_scf\"] + \"\\n\")\n com_f.write(\"end\\n\")\n\n if values[\"switch_guess\"] == \"true\": ### this is not part of the inp file!!!\n if values[\"guess_TM\"] == \"true\" and values[\"G4MP2TM\"]:\n com_f.write(\" Guess = \" + values[\"option_guess\"] + \"\\n\")\n com_f.write(\"end\\n\")\n \n if values[\"switch_load_rel_file\"] == \"true\":\n f1 = open(\"rel_file.txt\", \"r\")\n com_f.write(f1.read())\n f1.close()\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"check if rel_file.txt exists!!\")\n\n if values[\"SCFDIIS\"] == \"true\":\n com_f.write(\"%scf\\n DIISMaxEq 15\\n\")\n com_f.write(\" directresetfreq 1\\n\")\n com_f.write(\"end\\n\")\n\n if values[\"LSHIFT\"] == \"true\":\n com_f.write(\"%scf\\n\")\n com_f.write(\" Shift Shift 0.1 ErrOff 0.1 end\\n\")\n com_f.write(\"end\\n\")\n\n if values[\"SOSCF\"] == \"true\":\n com_f.write(\"%scf\\n\")\n com_f.write(\" soscfmaxit 12\\n\")\n com_f.write(\" directresetfreq 1\\n\")\n com_f.write(\"end\\n\")\n\n if values[\"switch_DLPNO_CCSDT\"] == \"true\":\n com_f.write(\"%mdci\\n\")\n com_f.write(\" UseFullLMP2Guess true\\n\")\n com_f.write(\" TcutDOPre = \" + str(values[\"TcutDOPre\"]) +\"\\n\") #TODO Is this really needed?\n com_f.write(\"end\\n\")\n\n if ( float(values[\"Ntotale\"]) <= float(values[\"nproc\"]) ) or ( (float(values[\"Ntotale\"])-float(values[\"Ntotalecore\"])) < float(values[\"nproc\"]) ):\n com_f.write(\"%pal nprocs 1 \\n\")\n else:\n com_f.write(\"%pal nprocs \"+values[\"nproc\"]+\" \\n\")\n com_f.write(\"end\\n\")\n\n com_f.write(\"%method\\n\") ## CHECK\n com_f.write(\" IntAcc 7.0\\n\")\n \n if values[\"optdiis\"] == \"true\":\n com_f.write(\" Z_solver DIIS\\n\")\n com_f.write(\" Z_MaxIter 300\\n\")\n \n if correlated == \"true\":\n uniq_atom_res = uniqatoms(sym)\n if values[\"ALLELE\"] == \"true\": ### CHECK!!!!\n for iat in range(int(uniq_atom_res[\"N_ua\"])):\n pre1 = uniq_atom_res[\"uniq_sym\"]\n at_pr1 = pre1[iat]\n com_f.write(\" NewNCore \" + at_pr1 + \" \" + \" 0 end\\n\")\n else:\n for iat in range(int(uniq_atom_res[\"N_ua\"])):\n pre1 = uniq_atom_res[\"uniq_sym\"]\n at_pr1 = pre1[iat]\n NFC_res = NFC(at_pr1)\n com_f.write(\" NewNCore \" + at_pr1 + \" \" + str(NFC_res) +\" end\\n\")\n \n com_f.write(\"end\\n\")\n \n if optfreq == \"true\":\n com_f.write(\"%geom\\n\")\n if values[\"MGGA\"] == \"true\":\n com_f.write(\" Calc_Hess true; NumHess true\\n\")\n else:\n com_f.write(\" Calc_Hess true\\n\")\n com_f.write(\" Recalc_Hess \" + str(values[\"iterhess\"]) +\" \\n\") ## revisit !!!! CHECK!!! IMPORTANT\n com_f.write(\"end\\n\")\n com_f.write(\"%freq Temp 273.15, 298.15\\n\")\n com_f.write(\"end\\n\")\n if custombasis == \"true\":\n com_f.write(\"%basis \\n\")\n \n if custombasis == \"true\":\n uniq_atom_res = uniqatoms(sym)\n fname = basis\n if Nat == 1: \n orca_printbas(fname, sym[0]) \n else:\n for iat1 in range(int(uniq_atom_res[\"N_ua\"])):\n orca_printbas(fname, uniq_atom_res[\"uniq_sym\"][iat1]) # GTBAS1 C \n with open(\"input.com\", \"a\") as com_f:\n com_f.write(\"end\\n\")\n\n os.system(values[\"orca_exe\"] + \" input.com > input.out\")\n os.system(\"cat input.com >> ORCA.inp\")\n os.system(\"cat input.out >> ORCA.out\")\n #os.system(\"rm -f input*\")\n####### orca_run - E\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class UserUpdateAPIView(UpdateAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def post(self, request, format=None):
data = request
queryset = User.objects.get()
class UserTokenVerifyAPIView(APIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
if 'token' in request.data and request.data['token']:
token_key = request.data['token']
conf_token = Token.objects.filter(key=token_key)
if conf_token:
confirmed_user = conf_token.first().user.userprofile
if not confirmed_user.is_authenticated:
confirmed_user.is_authenticated = True
confirmed_user.save()
return Response({'data': 'Success'}, status=HTTP_200_OK)
return Response({'error': 'User not found'}, status=
HTTP_400_BAD_REQUEST)
class UserResetPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if 'data' not in request.data or 'email' not in request.data['data']:
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')
).hexdigest()
self.send_pwreset(email, token)
pwr_token = PasswordReset.objects.get_or_create(email=email, token=
token)
return Response({}, status=HTTP_200_OK)
def send_pwreset(self, email, token):
subject = 'Password reset instructions'
body = (
"""Follow these steps to reset your password. {0}
If you did not request for your password to be reset, please ignore this email."""
.format('http://127.0.0.1:3000/reset-password/{}'.format(token)))
from_email = 'from@email.com'
to_email = email
send_mail(subject, body, from_email, to_email)
class UserNewPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if not self.validate_request(validated_data):
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
newpw = validated_data['data']['password']
token = validated_data['data']['token']
pwreset_token = PasswordReset.objects.filter(token=token, consumed=
False, email=email)
if pwreset_token:
user = User.objects.get(email=email)
user.set_password(newpw)
user.save()
pwreset = pwreset_token[0]
pwreset.consumed = True
pwreset.save()
return Response({}, status=HTTP_200_OK)
return Response({}, status=HTTP_400_BAD_REQUEST)
def validate_request(self, data):
if 'data' not in data or 'email' not in data['data'
] or 'passwordconf' not in data['data'] or 'password' not in data[
'data'] or 'token' not in data['data'] or data['data']['password'
] != data['data']['passwordconf'] or not data['data']['token'
] or not data['data']['email']:
return False
else:
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserUpdateAPIView(UpdateAPIView):
serializer_class = UserUpdateSerializer
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
data = request
queryset = User.objects.get()
class UserTokenVerifyAPIView(APIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
if 'token' in request.data and request.data['token']:
token_key = request.data['token']
conf_token = Token.objects.filter(key=token_key)
if conf_token:
confirmed_user = conf_token.first().user.userprofile
if not confirmed_user.is_authenticated:
confirmed_user.is_authenticated = True
confirmed_user.save()
return Response({'data': 'Success'}, status=HTTP_200_OK)
return Response({'error': 'User not found'}, status=
HTTP_400_BAD_REQUEST)
class UserResetPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if 'data' not in request.data or 'email' not in request.data['data']:
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')
).hexdigest()
self.send_pwreset(email, token)
pwr_token = PasswordReset.objects.get_or_create(email=email, token=
token)
return Response({}, status=HTTP_200_OK)
def send_pwreset(self, email, token):
subject = 'Password reset instructions'
body = (
"""Follow these steps to reset your password. {0}
If you did not request for your password to be reset, please ignore this email."""
.format('http://127.0.0.1:3000/reset-password/{}'.format(token)))
from_email = 'from@email.com'
to_email = email
send_mail(subject, body, from_email, to_email)
class UserNewPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if not self.validate_request(validated_data):
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
newpw = validated_data['data']['password']
token = validated_data['data']['token']
pwreset_token = PasswordReset.objects.filter(token=token, consumed=
False, email=email)
if pwreset_token:
user = User.objects.get(email=email)
user.set_password(newpw)
user.save()
pwreset = pwreset_token[0]
pwreset.consumed = True
pwreset.save()
return Response({}, status=HTTP_200_OK)
return Response({}, status=HTTP_400_BAD_REQUEST)
def validate_request(self, data):
if 'data' not in data or 'email' not in data['data'
] or 'passwordconf' not in data['data'] or 'password' not in data[
'data'] or 'token' not in data['data'] or data['data']['password'
] != data['data']['passwordconf'] or not data['data']['token'
] or not data['data']['email']:
return False
else:
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserCreateAPIView(CreateAPIView):
<|reserved_special_token_0|>
def post(self, request, *args, **kwargs):
validated_data = request.data.get('user')
serializer = UserCreateSerializer(data=validated_data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=HTTP_200_OK)
else:
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class UserUpdateAPIView(UpdateAPIView):
serializer_class = UserUpdateSerializer
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
data = request
queryset = User.objects.get()
class UserTokenVerifyAPIView(APIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
if 'token' in request.data and request.data['token']:
token_key = request.data['token']
conf_token = Token.objects.filter(key=token_key)
if conf_token:
confirmed_user = conf_token.first().user.userprofile
if not confirmed_user.is_authenticated:
confirmed_user.is_authenticated = True
confirmed_user.save()
return Response({'data': 'Success'}, status=HTTP_200_OK)
return Response({'error': 'User not found'}, status=
HTTP_400_BAD_REQUEST)
class UserResetPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if 'data' not in request.data or 'email' not in request.data['data']:
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')
).hexdigest()
self.send_pwreset(email, token)
pwr_token = PasswordReset.objects.get_or_create(email=email, token=
token)
return Response({}, status=HTTP_200_OK)
def send_pwreset(self, email, token):
subject = 'Password reset instructions'
body = (
"""Follow these steps to reset your password. {0}
If you did not request for your password to be reset, please ignore this email."""
.format('http://127.0.0.1:3000/reset-password/{}'.format(token)))
from_email = 'from@email.com'
to_email = email
send_mail(subject, body, from_email, to_email)
class UserNewPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if not self.validate_request(validated_data):
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
newpw = validated_data['data']['password']
token = validated_data['data']['token']
pwreset_token = PasswordReset.objects.filter(token=token, consumed=
False, email=email)
if pwreset_token:
user = User.objects.get(email=email)
user.set_password(newpw)
user.save()
pwreset = pwreset_token[0]
pwreset.consumed = True
pwreset.save()
return Response({}, status=HTTP_200_OK)
return Response({}, status=HTTP_400_BAD_REQUEST)
def validate_request(self, data):
if 'data' not in data or 'email' not in data['data'
] or 'passwordconf' not in data['data'] or 'password' not in data[
'data'] or 'token' not in data['data'] or data['data']['password'
] != data['data']['passwordconf'] or not data['data']['token'
] or not data['data']['email']:
return False
else:
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
User = get_user_model()
class UserCreateAPIView(CreateAPIView):
serializer_class = UserCreateSerializer
def post(self, request, *args, **kwargs):
validated_data = request.data.get('user')
serializer = UserCreateSerializer(data=validated_data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=HTTP_200_OK)
else:
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class UserUpdateAPIView(UpdateAPIView):
serializer_class = UserUpdateSerializer
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
data = request
queryset = User.objects.get()
class UserTokenVerifyAPIView(APIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
if 'token' in request.data and request.data['token']:
token_key = request.data['token']
conf_token = Token.objects.filter(key=token_key)
if conf_token:
confirmed_user = conf_token.first().user.userprofile
if not confirmed_user.is_authenticated:
confirmed_user.is_authenticated = True
confirmed_user.save()
return Response({'data': 'Success'}, status=HTTP_200_OK)
return Response({'error': 'User not found'}, status=
HTTP_400_BAD_REQUEST)
class UserResetPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if 'data' not in request.data or 'email' not in request.data['data']:
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')
).hexdigest()
self.send_pwreset(email, token)
pwr_token = PasswordReset.objects.get_or_create(email=email, token=
token)
return Response({}, status=HTTP_200_OK)
def send_pwreset(self, email, token):
subject = 'Password reset instructions'
body = (
"""Follow these steps to reset your password. {0}
If you did not request for your password to be reset, please ignore this email."""
.format('http://127.0.0.1:3000/reset-password/{}'.format(token)))
from_email = 'from@email.com'
to_email = email
send_mail(subject, body, from_email, to_email)
class UserNewPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if not self.validate_request(validated_data):
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
newpw = validated_data['data']['password']
token = validated_data['data']['token']
pwreset_token = PasswordReset.objects.filter(token=token, consumed=
False, email=email)
if pwreset_token:
user = User.objects.get(email=email)
user.set_password(newpw)
user.save()
pwreset = pwreset_token[0]
pwreset.consumed = True
pwreset.save()
return Response({}, status=HTTP_200_OK)
return Response({}, status=HTTP_400_BAD_REQUEST)
def validate_request(self, data):
if 'data' not in data or 'email' not in data['data'
] or 'passwordconf' not in data['data'] or 'password' not in data[
'data'] or 'token' not in data['data'] or data['data']['password'
] != data['data']['passwordconf'] or not data['data']['token'
] or not data['data']['email']:
return False
else:
return True
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.db.models import Q
from django.contrib.auth import get_user_model
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from rest_framework.views import APIView
from rest_framework.generics import (CreateAPIView, UpdateAPIView)
from rest_framework.permissions import (AllowAny,IsAuthenticated,IsAdminUser,IsAuthenticatedOrReadOnly)
from rest_framework.authtoken.models import Token
from userprofile.serializers.create_user import UserCreateSerializer
from userprofile.serializers.update_user import UserUpdateSerializer
from userprofile.mailtrap import send_mail
from pwreset.models import PasswordReset
import hashlib
import os
User = get_user_model()
class UserCreateAPIView(CreateAPIView):
serializer_class = UserCreateSerializer
def post(self, request, *args, **kwargs):
validated_data = request.data.get('user')
serializer = UserCreateSerializer(data=validated_data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=HTTP_200_OK)
else:
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class UserUpdateAPIView(UpdateAPIView):
serializer_class = UserUpdateSerializer
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
data = request
queryset = User.objects.get()
class UserTokenVerifyAPIView(APIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
if ('token' in request.data) and request.data['token']:
token_key = request.data['token']
conf_token = Token.objects.filter(key=token_key)
if conf_token:
confirmed_user = conf_token.first().user.userprofile
if not confirmed_user.is_authenticated:
confirmed_user.is_authenticated = True
confirmed_user.save()
return Response({'data': 'Success'}, status=HTTP_200_OK)
return Response({'error': 'User not found'}, status=HTTP_400_BAD_REQUEST)
class UserResetPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if ('data' not in request.data) or ('email' not in request.data['data']):
return Response({}, status=HTTP_400_BAD_REQUEST)
email=validated_data['data']['email']
token = hashlib.sha256(bytes((email + os.environ['SALT']), 'utf-8')).hexdigest()
self.send_pwreset(email, token)
pwr_token = PasswordReset.objects.get_or_create(email=email, token=token)
return Response({}, status=HTTP_200_OK)
def send_pwreset(self, email, token):
subject = "Password reset instructions"
body = """Follow these steps to reset your password. {0} \n If you did not request for your password to be reset, please ignore this email.""".format("http://127.0.0.1:3000/reset-password/{}".format(token))
from_email = 'from@email.com'
to_email = email
send_mail(subject, body, from_email, to_email)
class UserNewPasswordAPIView(APIView):
def post(self, request, *args, **kwargs):
validated_data = request.data
if not self.validate_request(validated_data):
return Response({}, status=HTTP_400_BAD_REQUEST)
email = validated_data['data']['email']
newpw = validated_data['data']['password']
token = validated_data['data']['token']
pwreset_token = PasswordReset.objects.filter(token=token, consumed=False, email=email)
if pwreset_token:
user = User.objects.get(email=email)
user.set_password(newpw)
user.save()
pwreset = pwreset_token[0]
pwreset.consumed=True
pwreset.save()
return Response({}, status=HTTP_200_OK)
return Response({}, status=HTTP_400_BAD_REQUEST)
def validate_request(self, data):
if ('data' not in data) or ('email' not in data['data']) or ('passwordconf' not \
in data['data']) or ('password' not in data['data']) or ('token' not in \
data['data']) or (data['data']['password'] != data['data']['passwordconf']) or \
(not data['data']['token']) or (not data['data']['email']):
return False
else:
return True
|
flexible
|
{
"blob_id": "18f355041a9982de56ad2eb51b665dd39a156f0a",
"index": 9638,
"step-1": "<mask token>\n\n\nclass UserUpdateAPIView(UpdateAPIView):\n <mask token>\n <mask token>\n\n def post(self, request, format=None):\n data = request\n queryset = User.objects.get()\n\n\nclass UserTokenVerifyAPIView(APIView):\n permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n if 'token' in request.data and request.data['token']:\n token_key = request.data['token']\n conf_token = Token.objects.filter(key=token_key)\n if conf_token:\n confirmed_user = conf_token.first().user.userprofile\n if not confirmed_user.is_authenticated:\n confirmed_user.is_authenticated = True\n confirmed_user.save()\n return Response({'data': 'Success'}, status=HTTP_200_OK)\n return Response({'error': 'User not found'}, status=\n HTTP_400_BAD_REQUEST)\n\n\nclass UserResetPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if 'data' not in request.data or 'email' not in request.data['data']:\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')\n ).hexdigest()\n self.send_pwreset(email, token)\n pwr_token = PasswordReset.objects.get_or_create(email=email, token=\n token)\n return Response({}, status=HTTP_200_OK)\n\n def send_pwreset(self, email, token):\n subject = 'Password reset instructions'\n body = (\n \"\"\"Follow these steps to reset your password. {0} \n If you did not request for your password to be reset, please ignore this email.\"\"\"\n .format('http://127.0.0.1:3000/reset-password/{}'.format(token)))\n from_email = 'from@email.com'\n to_email = email\n send_mail(subject, body, from_email, to_email)\n\n\nclass UserNewPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if not self.validate_request(validated_data):\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n newpw = validated_data['data']['password']\n token = validated_data['data']['token']\n pwreset_token = PasswordReset.objects.filter(token=token, consumed=\n False, email=email)\n if pwreset_token:\n user = User.objects.get(email=email)\n user.set_password(newpw)\n user.save()\n pwreset = pwreset_token[0]\n pwreset.consumed = True\n pwreset.save()\n return Response({}, status=HTTP_200_OK)\n return Response({}, status=HTTP_400_BAD_REQUEST)\n\n def validate_request(self, data):\n if 'data' not in data or 'email' not in data['data'\n ] or 'passwordconf' not in data['data'] or 'password' not in data[\n 'data'] or 'token' not in data['data'] or data['data']['password'\n ] != data['data']['passwordconf'] or not data['data']['token'\n ] or not data['data']['email']:\n return False\n else:\n return True\n",
"step-2": "<mask token>\n\n\nclass UserUpdateAPIView(UpdateAPIView):\n serializer_class = UserUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n data = request\n queryset = User.objects.get()\n\n\nclass UserTokenVerifyAPIView(APIView):\n permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n if 'token' in request.data and request.data['token']:\n token_key = request.data['token']\n conf_token = Token.objects.filter(key=token_key)\n if conf_token:\n confirmed_user = conf_token.first().user.userprofile\n if not confirmed_user.is_authenticated:\n confirmed_user.is_authenticated = True\n confirmed_user.save()\n return Response({'data': 'Success'}, status=HTTP_200_OK)\n return Response({'error': 'User not found'}, status=\n HTTP_400_BAD_REQUEST)\n\n\nclass UserResetPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if 'data' not in request.data or 'email' not in request.data['data']:\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')\n ).hexdigest()\n self.send_pwreset(email, token)\n pwr_token = PasswordReset.objects.get_or_create(email=email, token=\n token)\n return Response({}, status=HTTP_200_OK)\n\n def send_pwreset(self, email, token):\n subject = 'Password reset instructions'\n body = (\n \"\"\"Follow these steps to reset your password. {0} \n If you did not request for your password to be reset, please ignore this email.\"\"\"\n .format('http://127.0.0.1:3000/reset-password/{}'.format(token)))\n from_email = 'from@email.com'\n to_email = email\n send_mail(subject, body, from_email, to_email)\n\n\nclass UserNewPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if not self.validate_request(validated_data):\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n newpw = validated_data['data']['password']\n token = validated_data['data']['token']\n pwreset_token = PasswordReset.objects.filter(token=token, consumed=\n False, email=email)\n if pwreset_token:\n user = User.objects.get(email=email)\n user.set_password(newpw)\n user.save()\n pwreset = pwreset_token[0]\n pwreset.consumed = True\n pwreset.save()\n return Response({}, status=HTTP_200_OK)\n return Response({}, status=HTTP_400_BAD_REQUEST)\n\n def validate_request(self, data):\n if 'data' not in data or 'email' not in data['data'\n ] or 'passwordconf' not in data['data'] or 'password' not in data[\n 'data'] or 'token' not in data['data'] or data['data']['password'\n ] != data['data']['passwordconf'] or not data['data']['token'\n ] or not data['data']['email']:\n return False\n else:\n return True\n",
"step-3": "<mask token>\n\n\nclass UserCreateAPIView(CreateAPIView):\n <mask token>\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data.get('user')\n serializer = UserCreateSerializer(data=validated_data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=HTTP_200_OK)\n else:\n return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)\n\n\nclass UserUpdateAPIView(UpdateAPIView):\n serializer_class = UserUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n data = request\n queryset = User.objects.get()\n\n\nclass UserTokenVerifyAPIView(APIView):\n permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n if 'token' in request.data and request.data['token']:\n token_key = request.data['token']\n conf_token = Token.objects.filter(key=token_key)\n if conf_token:\n confirmed_user = conf_token.first().user.userprofile\n if not confirmed_user.is_authenticated:\n confirmed_user.is_authenticated = True\n confirmed_user.save()\n return Response({'data': 'Success'}, status=HTTP_200_OK)\n return Response({'error': 'User not found'}, status=\n HTTP_400_BAD_REQUEST)\n\n\nclass UserResetPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if 'data' not in request.data or 'email' not in request.data['data']:\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')\n ).hexdigest()\n self.send_pwreset(email, token)\n pwr_token = PasswordReset.objects.get_or_create(email=email, token=\n token)\n return Response({}, status=HTTP_200_OK)\n\n def send_pwreset(self, email, token):\n subject = 'Password reset instructions'\n body = (\n \"\"\"Follow these steps to reset your password. {0} \n If you did not request for your password to be reset, please ignore this email.\"\"\"\n .format('http://127.0.0.1:3000/reset-password/{}'.format(token)))\n from_email = 'from@email.com'\n to_email = email\n send_mail(subject, body, from_email, to_email)\n\n\nclass UserNewPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if not self.validate_request(validated_data):\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n newpw = validated_data['data']['password']\n token = validated_data['data']['token']\n pwreset_token = PasswordReset.objects.filter(token=token, consumed=\n False, email=email)\n if pwreset_token:\n user = User.objects.get(email=email)\n user.set_password(newpw)\n user.save()\n pwreset = pwreset_token[0]\n pwreset.consumed = True\n pwreset.save()\n return Response({}, status=HTTP_200_OK)\n return Response({}, status=HTTP_400_BAD_REQUEST)\n\n def validate_request(self, data):\n if 'data' not in data or 'email' not in data['data'\n ] or 'passwordconf' not in data['data'] or 'password' not in data[\n 'data'] or 'token' not in data['data'] or data['data']['password'\n ] != data['data']['passwordconf'] or not data['data']['token'\n ] or not data['data']['email']:\n return False\n else:\n return True\n",
"step-4": "<mask token>\nUser = get_user_model()\n\n\nclass UserCreateAPIView(CreateAPIView):\n serializer_class = UserCreateSerializer\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data.get('user')\n serializer = UserCreateSerializer(data=validated_data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=HTTP_200_OK)\n else:\n return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)\n\n\nclass UserUpdateAPIView(UpdateAPIView):\n serializer_class = UserUpdateSerializer\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n data = request\n queryset = User.objects.get()\n\n\nclass UserTokenVerifyAPIView(APIView):\n permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n if 'token' in request.data and request.data['token']:\n token_key = request.data['token']\n conf_token = Token.objects.filter(key=token_key)\n if conf_token:\n confirmed_user = conf_token.first().user.userprofile\n if not confirmed_user.is_authenticated:\n confirmed_user.is_authenticated = True\n confirmed_user.save()\n return Response({'data': 'Success'}, status=HTTP_200_OK)\n return Response({'error': 'User not found'}, status=\n HTTP_400_BAD_REQUEST)\n\n\nclass UserResetPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if 'data' not in request.data or 'email' not in request.data['data']:\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n token = hashlib.sha256(bytes(email + os.environ['SALT'], 'utf-8')\n ).hexdigest()\n self.send_pwreset(email, token)\n pwr_token = PasswordReset.objects.get_or_create(email=email, token=\n token)\n return Response({}, status=HTTP_200_OK)\n\n def send_pwreset(self, email, token):\n subject = 'Password reset instructions'\n body = (\n \"\"\"Follow these steps to reset your password. {0} \n If you did not request for your password to be reset, please ignore this email.\"\"\"\n .format('http://127.0.0.1:3000/reset-password/{}'.format(token)))\n from_email = 'from@email.com'\n to_email = email\n send_mail(subject, body, from_email, to_email)\n\n\nclass UserNewPasswordAPIView(APIView):\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if not self.validate_request(validated_data):\n return Response({}, status=HTTP_400_BAD_REQUEST)\n email = validated_data['data']['email']\n newpw = validated_data['data']['password']\n token = validated_data['data']['token']\n pwreset_token = PasswordReset.objects.filter(token=token, consumed=\n False, email=email)\n if pwreset_token:\n user = User.objects.get(email=email)\n user.set_password(newpw)\n user.save()\n pwreset = pwreset_token[0]\n pwreset.consumed = True\n pwreset.save()\n return Response({}, status=HTTP_200_OK)\n return Response({}, status=HTTP_400_BAD_REQUEST)\n\n def validate_request(self, data):\n if 'data' not in data or 'email' not in data['data'\n ] or 'passwordconf' not in data['data'] or 'password' not in data[\n 'data'] or 'token' not in data['data'] or data['data']['password'\n ] != data['data']['passwordconf'] or not data['data']['token'\n ] or not data['data']['email']:\n return False\n else:\n return True\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render\nfrom django.db.models import Q\nfrom django.contrib.auth import get_user_model\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import (CreateAPIView, UpdateAPIView)\nfrom rest_framework.permissions import (AllowAny,IsAuthenticated,IsAdminUser,IsAuthenticatedOrReadOnly)\nfrom rest_framework.authtoken.models import Token\nfrom userprofile.serializers.create_user import UserCreateSerializer\nfrom userprofile.serializers.update_user import UserUpdateSerializer\nfrom userprofile.mailtrap import send_mail\nfrom pwreset.models import PasswordReset\nimport hashlib\nimport os\n\nUser = get_user_model()\n\nclass UserCreateAPIView(CreateAPIView):\n serializer_class = UserCreateSerializer\n\n def post(self, request, *args, **kwargs):\n validated_data = request.data.get('user')\n serializer = UserCreateSerializer(data=validated_data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=HTTP_200_OK)\n else:\n return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)\n\n\nclass UserUpdateAPIView(UpdateAPIView):\n serializer_class = UserUpdateSerializer\n permission_classes = [IsAuthenticated]\n def post(self, request, format=None):\n data = request\n queryset = User.objects.get()\n\n\nclass UserTokenVerifyAPIView(APIView):\n permission_classes = [AllowAny]\n def post(self, request, *args, **kwargs):\n if ('token' in request.data) and request.data['token']:\n token_key = request.data['token']\n conf_token = Token.objects.filter(key=token_key)\n if conf_token:\n confirmed_user = conf_token.first().user.userprofile\n if not confirmed_user.is_authenticated:\n confirmed_user.is_authenticated = True\n confirmed_user.save()\n return Response({'data': 'Success'}, status=HTTP_200_OK)\n return Response({'error': 'User not found'}, status=HTTP_400_BAD_REQUEST)\n\n\nclass UserResetPasswordAPIView(APIView):\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if ('data' not in request.data) or ('email' not in request.data['data']):\n return Response({}, status=HTTP_400_BAD_REQUEST)\n\n email=validated_data['data']['email']\n token = hashlib.sha256(bytes((email + os.environ['SALT']), 'utf-8')).hexdigest()\n\n self.send_pwreset(email, token)\n pwr_token = PasswordReset.objects.get_or_create(email=email, token=token)\n return Response({}, status=HTTP_200_OK)\n\n\n\n def send_pwreset(self, email, token):\n subject = \"Password reset instructions\"\n body = \"\"\"Follow these steps to reset your password. {0} \\n If you did not request for your password to be reset, please ignore this email.\"\"\".format(\"http://127.0.0.1:3000/reset-password/{}\".format(token))\n from_email = 'from@email.com'\n to_email = email\n\n send_mail(subject, body, from_email, to_email)\n\n\nclass UserNewPasswordAPIView(APIView):\n def post(self, request, *args, **kwargs):\n validated_data = request.data\n if not self.validate_request(validated_data):\n return Response({}, status=HTTP_400_BAD_REQUEST)\n\n email = validated_data['data']['email']\n newpw = validated_data['data']['password']\n token = validated_data['data']['token']\n pwreset_token = PasswordReset.objects.filter(token=token, consumed=False, email=email)\n if pwreset_token:\n user = User.objects.get(email=email)\n user.set_password(newpw)\n user.save()\n pwreset = pwreset_token[0]\n pwreset.consumed=True\n pwreset.save()\n return Response({}, status=HTTP_200_OK)\n return Response({}, status=HTTP_400_BAD_REQUEST)\n\n def validate_request(self, data):\n if ('data' not in data) or ('email' not in data['data']) or ('passwordconf' not \\\n in data['data']) or ('password' not in data['data']) or ('token' not in \\\n data['data']) or (data['data']['password'] != data['data']['passwordconf']) or \\\n (not data['data']['token']) or (not data['data']['email']):\n return False\n else:\n return True\n",
"step-ids": [
11,
12,
14,
16,
18
]
}
|
[
11,
12,
14,
16,
18
] |
<|reserved_special_token_0|>
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CourceListView(ListView):
model = Courses
template_name = 'cources/cource_list.html'
context_object_name = 'cources'
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CourceListView(ListView):
model = Courses
template_name = 'cources/cource_list.html'
context_object_name = 'cources'
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
def DeleteView(request, pk):
cource = Courses.objects.filter(pk=pk)
cource.delete()
return redirect(reverse('cources:cource_list'))
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from .models import Courses
from django.views.generic import CreateView, ListView, UpdateView, DeleteView
from .forms import CourceCreateForm
from django.urls import reverse_lazy
from django.urls import reverse
class CourceListView(ListView):
model = Courses
template_name = 'cources/cource_list.html'
context_object_name = 'cources'
class CourceCreateView(CreateView):
template_name = 'cources/create_cource.html'
form_class = CourceCreateForm
success_url = reverse_lazy('cources:cource_list')
class CourceUpdateView(UpdateView):
model = Courses
form_class = CourceCreateForm
template_name = 'cources/course_update.html'
success_url = reverse_lazy('cources:cource_list')
def DeleteView(request, pk):
cource = Courses.objects.filter(pk=pk)
cource.delete()
return redirect(reverse('cources:cource_list'))
|
flexible
|
{
"blob_id": "3340277df91f1421dab8d204eddce65b4604432b",
"index": 369,
"step-1": "<mask token>\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CourceListView(ListView):\n model = Courses\n template_name = 'cources/cource_list.html'\n context_object_name = 'cources'\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CourceListView(ListView):\n model = Courses\n template_name = 'cources/cource_list.html'\n context_object_name = 'cources'\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\ndef DeleteView(request, pk):\n cource = Courses.objects.filter(pk=pk)\n cource.delete()\n return redirect(reverse('cources:cource_list'))\n",
"step-4": "from django.shortcuts import render, redirect\nfrom .models import Courses\nfrom django.views.generic import CreateView, ListView, UpdateView, DeleteView\nfrom .forms import CourceCreateForm\nfrom django.urls import reverse_lazy\nfrom django.urls import reverse\n\n\nclass CourceListView(ListView):\n model = Courses\n template_name = 'cources/cource_list.html'\n context_object_name = 'cources'\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\ndef DeleteView(request, pk):\n cource = Courses.objects.filter(pk=pk)\n cource.delete()\n return redirect(reverse('cources:cource_list'))\n",
"step-5": null,
"step-ids": [
4,
6,
7,
8
]
}
|
[
4,
6,
7,
8
] |
import requests
from SPARQLWrapper import SPARQLWrapper, JSON
from rdflib import Graph
from plenum.server.plugin.graphchain.graph_store import GraphStore
from plenum.server.plugin.graphchain.logger import get_debug_logger
logger = get_debug_logger()
class StardogGraphStore(GraphStore):
def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):
super(StardogGraphStore, self).__init__(ts_db_name, ts_url)
self._ts_user = ts_user
self._ts_pass = ts_pass
msg = "Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'." \
.format(ts_user, self._node_ts_url)
logger.info(msg)
def check_whether_db_exists(self):
logger.debug("Checking whether a triple store with db '{}' exists...".format(self._node_ts_url))
url = self._get_ts_db_url()
r = requests.get(url, auth=(self._ts_user, self._ts_pass))
status_code = r.status_code
logger.debug("Status type of response whether db exists: {}.".format(status_code))
return status_code == 200
def add_graph(self, raw_graph, graph_format, graph_hash):
logger.debug("Adding graph to the triple store with URL '{}'...".format(self._get_sparql_endpoint_for_update()))
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
g = Graph()
g.parse(data=raw_graph, format=graph_format)
sparql_query = SPARQLWrapper(
self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.serialize(format='nt').decode())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setCredentials(self._ts_user, self._ts_pass)
sparql_query.query()
def check_if_graph_is_already_stored(self, graph_hash: str) -> bool:
ihash = GraphStore.IHASH_PREFIX.format(graph_hash)
logger.debug("Checking whether graph '{}' is already in the triple store...".format(ihash))
query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)
sparql_query = SPARQLWrapper(
self._get_sparql_endpoint_for_query(),
self._get_sparql_endpoint_for_update())
sparql_query.setQuery(query)
sparql_query.method = 'POST'
sparql_query.setReturnFormat(JSON)
sparql_query.setCredentials(self._ts_user, self._ts_pass)
result = sparql_query.query()
return result.convert()['boolean']
|
normal
|
{
"blob_id": "a42a94798d176e20646d41cf0f4b7e4f99e0790b",
"index": 105,
"step-1": "<mask token>\n\n\nclass StardogGraphStore(GraphStore):\n <mask token>\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n",
"step-2": "<mask token>\n\n\nclass StardogGraphStore(GraphStore):\n\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n msg = (\n \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\"\n .format(ts_user, self._node_ts_url))\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n",
"step-3": "<mask token>\nlogger = get_debug_logger()\n\n\nclass StardogGraphStore(GraphStore):\n\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n msg = (\n \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\"\n .format(ts_user, self._node_ts_url))\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n",
"step-4": "import requests\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom rdflib import Graph\nfrom plenum.server.plugin.graphchain.graph_store import GraphStore\nfrom plenum.server.plugin.graphchain.logger import get_debug_logger\nlogger = get_debug_logger()\n\n\nclass StardogGraphStore(GraphStore):\n\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n msg = (\n \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\"\n .format(ts_user, self._node_ts_url))\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\"\n .format(self._node_ts_url))\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug('Status type of response whether db exists: {}.'.\n format(status_code))\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".\n format(self._get_sparql_endpoint_for_update()))\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.\n serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) ->bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n logger.debug(\n \"Checking whether graph '{}' is already in the triple store...\"\n .format(ihash))\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n sparql_query = SPARQLWrapper(self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n",
"step-5": "import requests\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom rdflib import Graph\n\nfrom plenum.server.plugin.graphchain.graph_store import GraphStore\nfrom plenum.server.plugin.graphchain.logger import get_debug_logger\n\nlogger = get_debug_logger()\n\n\nclass StardogGraphStore(GraphStore):\n def __init__(self, ts_db_name, ts_url, ts_user, ts_pass):\n super(StardogGraphStore, self).__init__(ts_db_name, ts_url)\n\n self._ts_user = ts_user\n self._ts_pass = ts_pass\n\n msg = \"Created a new StardogGraphStore with with user equal to '{}' and URL equal to '{}'.\" \\\n .format(ts_user, self._node_ts_url)\n logger.info(msg)\n\n def check_whether_db_exists(self):\n logger.debug(\"Checking whether a triple store with db '{}' exists...\".format(self._node_ts_url))\n\n url = self._get_ts_db_url()\n r = requests.get(url, auth=(self._ts_user, self._ts_pass))\n status_code = r.status_code\n logger.debug(\"Status type of response whether db exists: {}.\".format(status_code))\n\n return status_code == 200\n\n def add_graph(self, raw_graph, graph_format, graph_hash):\n logger.debug(\"Adding graph to the triple store with URL '{}'...\".format(self._get_sparql_endpoint_for_update()))\n\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n\n g = Graph()\n g.parse(data=raw_graph, format=graph_format)\n\n sparql_query = SPARQLWrapper(\n self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n\n query = GraphStore.INSERT_GRAPH_QUERY_TEMPLATE.format(ihash, g.serialize(format='nt').decode())\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n sparql_query.query()\n\n def check_if_graph_is_already_stored(self, graph_hash: str) -> bool:\n ihash = GraphStore.IHASH_PREFIX.format(graph_hash)\n\n logger.debug(\"Checking whether graph '{}' is already in the triple store...\".format(ihash))\n\n query = GraphStore.ASK_IF_GRAPH_IS_ALREADY_STORED.format(ihash)\n\n sparql_query = SPARQLWrapper(\n self._get_sparql_endpoint_for_query(),\n self._get_sparql_endpoint_for_update())\n\n sparql_query.setQuery(query)\n sparql_query.method = 'POST'\n sparql_query.setReturnFormat(JSON)\n sparql_query.setCredentials(self._ts_user, self._ts_pass)\n result = sparql_query.query()\n return result.convert()['boolean']\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class ListNode:
def __init__(self,listt,node,g,h):
self.node_list = []
for element in listt:
self.node_list.append(element)
self.node_list.append(node)
self.g=g
self.f = int(g)+int(h);
self.ID = node
def is_Goal(self,complete_nodes):
if complete_nodes in self.node_list:
return True
return False
|
normal
|
{
"blob_id": "2b796fb99e4607d310a533e8d9897100c4df087d",
"index": 2665,
"step-1": "<mask token>\n",
"step-2": "class ListNode:\n <mask token>\n <mask token>\n",
"step-3": "class ListNode:\n\n def __init__(self, listt, node, g, h):\n self.node_list = []\n for element in listt:\n self.node_list.append(element)\n self.node_list.append(node)\n self.g = g\n self.f = int(g) + int(h)\n self.ID = node\n <mask token>\n",
"step-4": "class ListNode:\n\n def __init__(self, listt, node, g, h):\n self.node_list = []\n for element in listt:\n self.node_list.append(element)\n self.node_list.append(node)\n self.g = g\n self.f = int(g) + int(h)\n self.ID = node\n\n def is_Goal(self, complete_nodes):\n if complete_nodes in self.node_list:\n return True\n return False\n",
"step-5": "class ListNode:\r\n def __init__(self,listt,node,g,h):\r\n self.node_list = []\r\n for element in listt:\r\n self.node_list.append(element)\r\n self.node_list.append(node)\r\n\r\n self.g=g\r\n self.f = int(g)+int(h);\r\n self.ID = node\r\n\r\n\r\n\r\n def is_Goal(self,complete_nodes):\r\n\r\n if complete_nodes in self.node_list:\r\n return True\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import urllib3
with open('python.jpg', 'rb') as f:
data = f.read()
http = urllib3.PoolManager()
r = http.request('POST', 'http://httpbin.org/post', body=data, headers={
'Content-Type': 'image/jpeg'})
print(r.data.decode())
|
normal
|
{
"blob_id": "98dbc6c3bdc3efb4310a2dbb7b1cc1c89eb4582b",
"index": 7354,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('python.jpg', 'rb') as f:\n data = f.read()\n<mask token>\nprint(r.data.decode())\n",
"step-3": "<mask token>\nwith open('python.jpg', 'rb') as f:\n data = f.read()\nhttp = urllib3.PoolManager()\nr = http.request('POST', 'http://httpbin.org/post', body=data, headers={\n 'Content-Type': 'image/jpeg'})\nprint(r.data.decode())\n",
"step-4": "import urllib3\nwith open('python.jpg', 'rb') as f:\n data = f.read()\nhttp = urllib3.PoolManager()\nr = http.request('POST', 'http://httpbin.org/post', body=data, headers={\n 'Content-Type': 'image/jpeg'})\nprint(r.data.decode())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.preprocessing import normalize
def blackbox_function(x, y=None, sim=False):
if sim:
if y is None:
return -x ** 2 + 6
else:
return -(x+y) ** 2 + 6
# Reading the magnitude of the N170 data
filename = 'Output.txt'
lines = open(filename).read().splitlines()
try:
latency = float(lines[-1])
except ValueError:
print('Failed to convert value to float')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
latency = float(lines[-1])
except IndexError:
print('The latent file is empty')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
latency = float(lines[-1])
return latency
def obtain_confidence(sim=False):
if sim:
noise = np.random.normal(0, 0.60, size=1)[0]
return noise
# Reading the Confidence levels of the target value
filename = 'Confidence.txt'
lines = open(filename).read().splitlines()
try:
confidence = float(lines[-1])
except ValueError:
print('Failed to convert confidence value to float')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
confidence = float(lines[-1])
except IndexError:
print('The confidence file is empty')
wait = input("PRESS ENTER TO CONTINUE.")
lines = open(filename).read().splitlines()
confidence = float(lines[-1])
return confidence
def posterior(optimizer, x_obs, y_obs, grid):
optimizer._gp.fit(x_obs, y_obs)
mu, sigma = optimizer._gp.predict(grid, return_std=True)
return mu, sigma
def plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):
fig = plt.figure(figsize=(16, 10))
steps = len(optimizer.res)
fig.suptitle(
'Gaussian Process and Utility Function After {} Steps'.format(steps),
fontdict={'size': 30}
)
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
# x_obs = np.array([[res["params"]["x"]] for res in optimizer.res])
# y_obs = np.array([res["target"] for res in optimizer.res])
x_obs = np.array([[res["params"]["x"]] for res in optimizer.res])
y_obs = np.array([res["target"] for res in optimizer.res])
y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)
y_obs = y_obs.flatten()
mu, sigma = posterior(optimizer, x_obs, y_obs, x)
utility = utility_function.utility(x, optimizer._gp, y_obs.max())
# Unnormalize data
mu = mu*norm
sigma = sigma*norm
y_obs = y_obs*norm
if y is not None:
axis.plot(x, y, linewidth=3, label='Target')
axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x, mu, '--', color='k', label='Prediction')
axis.fill(np.concatenate([x, x[::-1]]),
np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
alpha=.6, fc='c', ec='None', label='95% confidence interval')
# if(bounds == "large"):
# axis.set_xlim((-1, 1))
# else:
# axis.set_xlim((0, 1))
# axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size': 20})
axis.set_xlabel('x', fontdict={'size': 20})
# utility = utility_function.utility(x, optimizer._gp, 0)
acq.plot(x, utility, label='Utility Function', color='purple')
acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# if (bounds == "large"):
# acq.set_xlim((-1, 1))
# else:
# acq.set_xlim((0, 1))
# acq.set_ylim((0, np.max(utility) + 0.5))
acq.set_ylabel('Utility', fontdict={'size': 20})
acq.set_xlabel('x', fontdict={'size': 20})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
fig.savefig(logpath+'/fig_{}'.format(i))
|
normal
|
{
"blob_id": "6defbe25fc17e53df2fc4d32886bba1cb141bdfd",
"index": 7018,
"step-1": "<mask token>\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\n<mask token>\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-2": "<mask token>\n\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x + y) ** 2 + 6\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\n<mask token>\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-3": "<mask token>\n\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x + y) ** 2 + 6\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\ndef posterior(optimizer, x_obs, y_obs, grid):\n optimizer._gp.fit(x_obs, y_obs)\n mu, sigma = optimizer._gp.predict(grid, return_std=True)\n return mu, sigma\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom sklearn.preprocessing import normalize\n\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x + y) ** 2 + 6\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.6, size=1)[0]\n return noise\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input('PRESS ENTER TO CONTINUE.')\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\ndef posterior(optimizer, x_obs, y_obs, grid):\n optimizer._gp.fit(x_obs, y_obs)\n mu, sigma = optimizer._gp.predict(grid, return_std=True)\n return mu, sigma\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle('Gaussian Process and Utility Function After {} Steps'.\n format(steps), fontdict={'size': 30})\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n x_obs = np.array([[res['params']['x']] for res in optimizer.res])\n y_obs = np.array([res['target'] for res in optimizer.res])\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n mu = mu * norm\n sigma = sigma * norm\n y_obs = y_obs * norm\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=\n u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu - 1.96 *\n sigma, (mu + 1.96 * sigma)[::-1]]), alpha=0.6, fc='c', ec='None',\n label='95% confidence interval')\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor=\n 'k', markeredgewidth=1)\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.0)\n fig.savefig(logpath + '/fig_{}'.format(i))\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom sklearn.preprocessing import normalize\n\ndef blackbox_function(x, y=None, sim=False):\n if sim:\n if y is None:\n return -x ** 2 + 6\n else:\n return -(x+y) ** 2 + 6\n\n # Reading the magnitude of the N170 data\n filename = 'Output.txt'\n lines = open(filename).read().splitlines()\n\n try:\n latency = float(lines[-1])\n except ValueError:\n print('Failed to convert value to float')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n except IndexError:\n print('The latent file is empty')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n latency = float(lines[-1])\n return latency\n\n\ndef obtain_confidence(sim=False):\n if sim:\n noise = np.random.normal(0, 0.60, size=1)[0]\n return noise\n\n # Reading the Confidence levels of the target value\n filename = 'Confidence.txt'\n lines = open(filename).read().splitlines()\n\n try:\n confidence = float(lines[-1])\n except ValueError:\n print('Failed to convert confidence value to float')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n except IndexError:\n print('The confidence file is empty')\n wait = input(\"PRESS ENTER TO CONTINUE.\")\n lines = open(filename).read().splitlines()\n confidence = float(lines[-1])\n return confidence\n\n\n\ndef posterior(optimizer, x_obs, y_obs, grid):\n\n optimizer._gp.fit(x_obs, y_obs)\n\n mu, sigma = optimizer._gp.predict(grid, return_std=True)\n return mu, sigma\n\n\ndef plot_gp(optimizer, logpath, i, utility_function, bounds, x, y=None):\n\n fig = plt.figure(figsize=(16, 10))\n steps = len(optimizer.res)\n fig.suptitle(\n 'Gaussian Process and Utility Function After {} Steps'.format(steps),\n fontdict={'size': 30}\n )\n\n gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n\n # x_obs = np.array([[res[\"params\"][\"x\"]] for res in optimizer.res])\n # y_obs = np.array([res[\"target\"] for res in optimizer.res])\n\n x_obs = np.array([[res[\"params\"][\"x\"]] for res in optimizer.res])\n y_obs = np.array([res[\"target\"] for res in optimizer.res])\n\n y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)\n y_obs = y_obs.flatten()\n\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n utility = utility_function.utility(x, optimizer._gp, y_obs.max())\n\n # Unnormalize data\n mu = mu*norm\n sigma = sigma*norm\n y_obs = y_obs*norm\n\n if y is not None:\n axis.plot(x, y, linewidth=3, label='Target')\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n\n axis.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),\n alpha=.6, fc='c', ec='None', label='95% confidence interval')\n # if(bounds == \"large\"):\n # axis.set_xlim((-1, 1))\n # else:\n # axis.set_xlim((0, 1))\n # axis.set_ylim((None, None))\n axis.set_ylabel('f(x)', fontdict={'size': 20})\n axis.set_xlabel('x', fontdict={'size': 20})\n\n # utility = utility_function.utility(x, optimizer._gp, 0)\n acq.plot(x, utility, label='Utility Function', color='purple')\n acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,\n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)\n\n # if (bounds == \"large\"):\n # acq.set_xlim((-1, 1))\n # else:\n # acq.set_xlim((0, 1))\n # acq.set_ylim((0, np.max(utility) + 0.5))\n acq.set_ylabel('Utility', fontdict={'size': 20})\n acq.set_xlabel('x', fontdict={'size': 20})\n\n axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)\n acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)\n\n fig.savefig(logpath+'/fig_{}'.format(i))\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20180402_2344'),
]
operations = [
migrations.CreateModel(
name='HeriInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('hcontent', tinymce.models.HTMLField()),
],
),
migrations.AlterField(
model_name='books',
name='type_id',
field=models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[('ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE', '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'), ('PYTHON', 'python')]),
),
]
|
normal
|
{
"blob_id": "2c4fe8015968b8a78c7b2ea33ac5e21e01c82e6e",
"index": 2818,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('book', '0002_auto_20180402_2344')]\n operations = [migrations.CreateModel(name='HeriInfo', fields=[('id',\n models.AutoField(verbose_name='ID', primary_key=True, auto_created=\n True, serialize=False)), ('hcontent', tinymce.models.HTMLField())]),\n migrations.AlterField(model_name='books', name='type_id', field=\n models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[(\n 'ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE',\n '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'),\n ('PYTHON', 'python')]))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\nimport tinymce.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('book', '0002_auto_20180402_2344')]\n operations = [migrations.CreateModel(name='HeriInfo', fields=[('id',\n models.AutoField(verbose_name='ID', primary_key=True, auto_created=\n True, serialize=False)), ('hcontent', tinymce.models.HTMLField())]),\n migrations.AlterField(model_name='books', name='type_id', field=\n models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[(\n 'ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE',\n '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'),\n ('PYTHON', 'python')]))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport tinymce.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('book', '0002_auto_20180402_2344'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HeriInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('hcontent', tinymce.models.HTMLField()),\n ],\n ),\n migrations.AlterField(\n model_name='books',\n name='type_id',\n field=models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[('ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE', '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'), ('PYTHON', 'python')]),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
websocket_urlpatterns = [re_path('ws/chat/(?P<room_id>\\w+)/$',
ChatConsumer), re_path('ws/lobby/$', ChatLobbyConsumer)]
<|reserved_special_token_1|>
from django.urls import re_path
from .consumers import ChatConsumer, ChatLobbyConsumer
websocket_urlpatterns = [re_path('ws/chat/(?P<room_id>\\w+)/$',
ChatConsumer), re_path('ws/lobby/$', ChatLobbyConsumer)]
<|reserved_special_token_1|>
from django.urls import re_path
from .consumers import ChatConsumer, ChatLobbyConsumer
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_id>\w+)/$', ChatConsumer),
re_path(r'ws/lobby/$', ChatLobbyConsumer),
]
|
flexible
|
{
"blob_id": "1bd1769f94b93e0bb674adfd1bb96c778708f6d8",
"index": 5593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwebsocket_urlpatterns = [re_path('ws/chat/(?P<room_id>\\\\w+)/$',\n ChatConsumer), re_path('ws/lobby/$', ChatLobbyConsumer)]\n",
"step-3": "from django.urls import re_path\nfrom .consumers import ChatConsumer, ChatLobbyConsumer\nwebsocket_urlpatterns = [re_path('ws/chat/(?P<room_id>\\\\w+)/$',\n ChatConsumer), re_path('ws/lobby/$', ChatLobbyConsumer)]\n",
"step-4": "from django.urls import re_path\n\nfrom .consumers import ChatConsumer, ChatLobbyConsumer\n\nwebsocket_urlpatterns = [\n re_path(r'ws/chat/(?P<room_id>\\w+)/$', ChatConsumer),\n re_path(r'ws/lobby/$', ChatLobbyConsumer),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from features.steps.web.test_home_page import *
from features.steps.mobile.test_home_page import *
from features.steps.web.test_login_page import *
|
flexible
|
{
"blob_id": "b09d0806dfc6f4badfd9f2ac9c3f6d17d3df8e8c",
"index": 3254,
"step-1": "<mask token>\n",
"step-2": "from features.steps.web.test_home_page import *\nfrom features.steps.mobile.test_home_page import *\nfrom features.steps.web.test_login_page import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""deserialization tools"""
import typing as t
from datetime import datetime
from functools import partial
from toolz import compose, flip, valmap
from valuable import load, xml
from . import types
registry = load.PrimitiveRegistry({
bool: dict(true=True, false=False).__getitem__,
datetime: partial(flip(datetime.strptime), '%Y-%m-%dT%H:%M:%S%z'),
str: str.strip,
**{
c: c for c in [
int,
float,
types.Journey.Status,
types.Journey.Component.Status
]
}
}) | load.GenericRegistry({
t.List: load.list_loader,
}) | load.get_optional_loader | load.DataclassRegistry({
types.Station: {**valmap(xml.textgetter, {
'code': 'Code',
'type': 'Type',
'country': 'Land',
'uic': 'UICCode',
'lat': 'Lat',
'lon': 'Lon',
'name': 'Namen/Middel',
'full_name': 'Namen/Lang',
'short_name': 'Namen/Kort',
}), **{
'synonyms': xml.textsgetter('Synoniemen/Synoniem'),
}},
types.Journey: {**valmap(xml.textgetter, {
'transfer_count': 'AantalOverstappen',
'planned_duration': 'GeplandeReisTijd',
'planned_departure': 'GeplandeVertrekTijd',
'planned_arrival': 'GeplandeAankomstTijd',
'actual_duration': 'ActueleReisTijd',
'actual_departure': 'ActueleVertrekTijd',
'actual_arrival': 'ActueleAankomstTijd',
'status': 'Status',
}), **{
'components': xml.elemsgetter('ReisDeel'),
'notifications': xml.elemsgetter('Melding'),
}, **{
'optimal': xml.textgetter('Optimaal', default='false')
}},
types.Departure: {**valmap(xml.textgetter, {
'ride_number': 'RitNummer',
'time': 'VertrekTijd',
'destination': 'EindBestemming',
'train_type': 'TreinSoort',
'carrier': 'Vervoerder',
'platform': 'VertrekSpoor',
}), **{
'platform_changed': xml.attribgetter('VertrekSpoor', 'wijziging'),
'comments': xml.textsgetter('Opmerkingen/Opmerking'),
'delay': xml.textgetter('VertrekVertragingTekst',
default=None),
'travel_tip': xml.textgetter('ReisTip', default=None),
'route_text': xml.textgetter('RouteTekst', default=None),
}},
types.Journey.Component: {**valmap(xml.textgetter, {
'carrier': 'Vervoerder',
'type': 'VervoerType',
'ride_number': 'RitNummer',
'status': 'Status',
}), **{
'details': xml.textsgetter('Reisdetails/Reisdetail'),
'kind': xml.attribgetter('.', 'reisSoort'),
'stops': xml.elemsgetter('ReisStop'),
}},
types.Journey.Component.Stop: {
'name': xml.textgetter('Naam'),
'time': compose(lambda x: x or None,
xml.textgetter('Tijd')),
'platform_changed': xml.attribgetter('Spoor', 'wijziging',
default=None),
'delay': xml.textgetter('VertrekVertraging', default=None),
'platform': xml.textgetter('Spoor', default=None)
},
types.Journey.Notification: valmap(xml.textgetter, {
'id': 'Id',
'serious': 'Ernstig',
'text': 'Text',
})
})
|
normal
|
{
"blob_id": "2dcb2d8d41096f0affe569d8ddbdd190885d5f14",
"index": 4738,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregistry = load.PrimitiveRegistry({bool: dict(true=True, false=False).\n __getitem__, datetime: partial(flip(datetime.strptime),\n '%Y-%m-%dT%H:%M:%S%z'), str: str.strip, **{c: c for c in [int, float,\n types.Journey.Status, types.Journey.Component.Status]}}\n ) | load.GenericRegistry({t.List: load.list_loader}\n ) | load.get_optional_loader | load.DataclassRegistry({types.Station: {\n **valmap(xml.textgetter, {'code': 'Code', 'type': 'Type', 'country':\n 'Land', 'uic': 'UICCode', 'lat': 'Lat', 'lon': 'Lon', 'name':\n 'Namen/Middel', 'full_name': 'Namen/Lang', 'short_name': 'Namen/Kort'}),\n **{'synonyms': xml.textsgetter('Synoniemen/Synoniem')}}, types.Journey:\n {**valmap(xml.textgetter, {'transfer_count': 'AantalOverstappen',\n 'planned_duration': 'GeplandeReisTijd', 'planned_departure':\n 'GeplandeVertrekTijd', 'planned_arrival': 'GeplandeAankomstTijd',\n 'actual_duration': 'ActueleReisTijd', 'actual_departure':\n 'ActueleVertrekTijd', 'actual_arrival': 'ActueleAankomstTijd', 'status':\n 'Status'}), **{'components': xml.elemsgetter('ReisDeel'),\n 'notifications': xml.elemsgetter('Melding')}, **{'optimal': xml.\n textgetter('Optimaal', default='false')}}, types.Departure: {**valmap(\n xml.textgetter, {'ride_number': 'RitNummer', 'time': 'VertrekTijd',\n 'destination': 'EindBestemming', 'train_type': 'TreinSoort', 'carrier':\n 'Vervoerder', 'platform': 'VertrekSpoor'}), **{'platform_changed': xml.\n attribgetter('VertrekSpoor', 'wijziging'), 'comments': xml.textsgetter(\n 'Opmerkingen/Opmerking'), 'delay': xml.textgetter(\n 'VertrekVertragingTekst', default=None), 'travel_tip': xml.textgetter(\n 'ReisTip', default=None), 'route_text': xml.textgetter('RouteTekst',\n default=None)}}, types.Journey.Component: {**valmap(xml.textgetter, {\n 'carrier': 'Vervoerder', 'type': 'VervoerType', 'ride_number':\n 'RitNummer', 'status': 'Status'}), **{'details': xml.textsgetter(\n 'Reisdetails/Reisdetail'), 'kind': xml.attribgetter('.', 'reisSoort'),\n 'stops': xml.elemsgetter('ReisStop')}}, types.Journey.Component.Stop: {\n 'name': xml.textgetter('Naam'), 'time': compose(lambda x: x or None,\n xml.textgetter('Tijd')), 'platform_changed': xml.attribgetter('Spoor',\n 'wijziging', default=None), 'delay': xml.textgetter('VertrekVertraging',\n default=None), 'platform': xml.textgetter('Spoor', default=None)},\n types.Journey.Notification: valmap(xml.textgetter, {'id': 'Id',\n 'serious': 'Ernstig', 'text': 'Text'})})\n",
"step-3": "<mask token>\nimport typing as t\nfrom datetime import datetime\nfrom functools import partial\nfrom toolz import compose, flip, valmap\nfrom valuable import load, xml\nfrom . import types\nregistry = load.PrimitiveRegistry({bool: dict(true=True, false=False).\n __getitem__, datetime: partial(flip(datetime.strptime),\n '%Y-%m-%dT%H:%M:%S%z'), str: str.strip, **{c: c for c in [int, float,\n types.Journey.Status, types.Journey.Component.Status]}}\n ) | load.GenericRegistry({t.List: load.list_loader}\n ) | load.get_optional_loader | load.DataclassRegistry({types.Station: {\n **valmap(xml.textgetter, {'code': 'Code', 'type': 'Type', 'country':\n 'Land', 'uic': 'UICCode', 'lat': 'Lat', 'lon': 'Lon', 'name':\n 'Namen/Middel', 'full_name': 'Namen/Lang', 'short_name': 'Namen/Kort'}),\n **{'synonyms': xml.textsgetter('Synoniemen/Synoniem')}}, types.Journey:\n {**valmap(xml.textgetter, {'transfer_count': 'AantalOverstappen',\n 'planned_duration': 'GeplandeReisTijd', 'planned_departure':\n 'GeplandeVertrekTijd', 'planned_arrival': 'GeplandeAankomstTijd',\n 'actual_duration': 'ActueleReisTijd', 'actual_departure':\n 'ActueleVertrekTijd', 'actual_arrival': 'ActueleAankomstTijd', 'status':\n 'Status'}), **{'components': xml.elemsgetter('ReisDeel'),\n 'notifications': xml.elemsgetter('Melding')}, **{'optimal': xml.\n textgetter('Optimaal', default='false')}}, types.Departure: {**valmap(\n xml.textgetter, {'ride_number': 'RitNummer', 'time': 'VertrekTijd',\n 'destination': 'EindBestemming', 'train_type': 'TreinSoort', 'carrier':\n 'Vervoerder', 'platform': 'VertrekSpoor'}), **{'platform_changed': xml.\n attribgetter('VertrekSpoor', 'wijziging'), 'comments': xml.textsgetter(\n 'Opmerkingen/Opmerking'), 'delay': xml.textgetter(\n 'VertrekVertragingTekst', default=None), 'travel_tip': xml.textgetter(\n 'ReisTip', default=None), 'route_text': xml.textgetter('RouteTekst',\n default=None)}}, types.Journey.Component: {**valmap(xml.textgetter, {\n 'carrier': 'Vervoerder', 'type': 'VervoerType', 'ride_number':\n 'RitNummer', 'status': 'Status'}), **{'details': xml.textsgetter(\n 'Reisdetails/Reisdetail'), 'kind': xml.attribgetter('.', 'reisSoort'),\n 'stops': xml.elemsgetter('ReisStop')}}, types.Journey.Component.Stop: {\n 'name': xml.textgetter('Naam'), 'time': compose(lambda x: x or None,\n xml.textgetter('Tijd')), 'platform_changed': xml.attribgetter('Spoor',\n 'wijziging', default=None), 'delay': xml.textgetter('VertrekVertraging',\n default=None), 'platform': xml.textgetter('Spoor', default=None)},\n types.Journey.Notification: valmap(xml.textgetter, {'id': 'Id',\n 'serious': 'Ernstig', 'text': 'Text'})})\n",
"step-4": "\"\"\"deserialization tools\"\"\"\nimport typing as t\nfrom datetime import datetime\nfrom functools import partial\n\nfrom toolz import compose, flip, valmap\nfrom valuable import load, xml\n\nfrom . import types\n\nregistry = load.PrimitiveRegistry({\n bool: dict(true=True, false=False).__getitem__,\n datetime: partial(flip(datetime.strptime), '%Y-%m-%dT%H:%M:%S%z'),\n str: str.strip,\n **{\n c: c for c in [\n int,\n float,\n types.Journey.Status,\n types.Journey.Component.Status\n ]\n }\n}) | load.GenericRegistry({\n t.List: load.list_loader,\n}) | load.get_optional_loader | load.DataclassRegistry({\n types.Station: {**valmap(xml.textgetter, {\n 'code': 'Code',\n 'type': 'Type',\n 'country': 'Land',\n 'uic': 'UICCode',\n 'lat': 'Lat',\n 'lon': 'Lon',\n 'name': 'Namen/Middel',\n 'full_name': 'Namen/Lang',\n 'short_name': 'Namen/Kort',\n }), **{\n 'synonyms': xml.textsgetter('Synoniemen/Synoniem'),\n }},\n types.Journey: {**valmap(xml.textgetter, {\n 'transfer_count': 'AantalOverstappen',\n 'planned_duration': 'GeplandeReisTijd',\n 'planned_departure': 'GeplandeVertrekTijd',\n 'planned_arrival': 'GeplandeAankomstTijd',\n 'actual_duration': 'ActueleReisTijd',\n 'actual_departure': 'ActueleVertrekTijd',\n 'actual_arrival': 'ActueleAankomstTijd',\n 'status': 'Status',\n }), **{\n 'components': xml.elemsgetter('ReisDeel'),\n 'notifications': xml.elemsgetter('Melding'),\n }, **{\n 'optimal': xml.textgetter('Optimaal', default='false')\n }},\n types.Departure: {**valmap(xml.textgetter, {\n 'ride_number': 'RitNummer',\n 'time': 'VertrekTijd',\n 'destination': 'EindBestemming',\n 'train_type': 'TreinSoort',\n 'carrier': 'Vervoerder',\n 'platform': 'VertrekSpoor',\n }), **{\n 'platform_changed': xml.attribgetter('VertrekSpoor', 'wijziging'),\n 'comments': xml.textsgetter('Opmerkingen/Opmerking'),\n 'delay': xml.textgetter('VertrekVertragingTekst',\n default=None),\n 'travel_tip': xml.textgetter('ReisTip', default=None),\n 'route_text': xml.textgetter('RouteTekst', default=None),\n }},\n types.Journey.Component: {**valmap(xml.textgetter, {\n 'carrier': 'Vervoerder',\n 'type': 'VervoerType',\n 'ride_number': 'RitNummer',\n 'status': 'Status',\n }), **{\n 'details': xml.textsgetter('Reisdetails/Reisdetail'),\n 'kind': xml.attribgetter('.', 'reisSoort'),\n 'stops': xml.elemsgetter('ReisStop'),\n }},\n types.Journey.Component.Stop: {\n 'name': xml.textgetter('Naam'),\n 'time': compose(lambda x: x or None,\n xml.textgetter('Tijd')),\n 'platform_changed': xml.attribgetter('Spoor', 'wijziging',\n default=None),\n 'delay': xml.textgetter('VertrekVertraging', default=None),\n 'platform': xml.textgetter('Spoor', default=None)\n },\n types.Journey.Notification: valmap(xml.textgetter, {\n 'id': 'Id',\n 'serious': 'Ernstig',\n 'text': 'Text',\n })\n})\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__.extend(colorbar_artist.__all__)
__all__.extend(subplot_artist.__all__)
__all__.extend(surface_3d_with_shadows.__all__)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['colorbar_artist', 'subplot_artist', 'surface_3d_with_shadows']
__all__.extend(colorbar_artist.__all__)
__all__.extend(subplot_artist.__all__)
__all__.extend(surface_3d_with_shadows.__all__)
<|reserved_special_token_1|>
from . import colorbar_artist
from . import subplot_artist
from . import surface_3d_with_shadows
from .colorbar_artist import *
from .subplot_artist import *
from .surface_3d_with_shadows import *
__all__ = ['colorbar_artist', 'subplot_artist', 'surface_3d_with_shadows']
__all__.extend(colorbar_artist.__all__)
__all__.extend(subplot_artist.__all__)
__all__.extend(surface_3d_with_shadows.__all__)
|
flexible
|
{
"blob_id": "16c4dbd472f9d32e5fa48a28dff4a40914f7d29e",
"index": 8231,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__.extend(colorbar_artist.__all__)\n__all__.extend(subplot_artist.__all__)\n__all__.extend(surface_3d_with_shadows.__all__)\n",
"step-3": "<mask token>\n__all__ = ['colorbar_artist', 'subplot_artist', 'surface_3d_with_shadows']\n__all__.extend(colorbar_artist.__all__)\n__all__.extend(subplot_artist.__all__)\n__all__.extend(surface_3d_with_shadows.__all__)\n",
"step-4": "from . import colorbar_artist\nfrom . import subplot_artist\nfrom . import surface_3d_with_shadows\nfrom .colorbar_artist import *\nfrom .subplot_artist import *\nfrom .surface_3d_with_shadows import *\n__all__ = ['colorbar_artist', 'subplot_artist', 'surface_3d_with_shadows']\n__all__.extend(colorbar_artist.__all__)\n__all__.extend(subplot_artist.__all__)\n__all__.extend(surface_3d_with_shadows.__all__)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
from moviepy.editor import VideoFileClip
output_images_dir = './output_images/'
test_images_dir = './test_images/'
output_video_file = 'output.mp4'
mtx = None
dist = None
def load_image(filename):
return mpimg.imread(filename)
def calibrate_camera(rows=6, cols=9):
mtx = None
dist = None
save_file = 'calibration.npz'
try:
data = np.load(save_file)
mtx = data['mtx']
dist = data['dist']
print('using saved calibration')
except FileNotFoundError:
print('begin calibration')
filenames = glob('camera_cal/*.jpg')
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
#Prepare object points, like (0,0,0), (1,0,0)...
objp = np.zeros((rows*cols,3), np.float32)
objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates
for f in filenames:
img = load_image(f)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)
if ret:
imgpoints.append(corners)
objpoints.append(objp)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
if ret:
for f in filenames:
img = load_image(f)
undist = cv2.undistort(img, mtx, dist, None, mtx)
save_output_image(undist, 'undistorted-' + f.split('/')[-1])
print('end calibration')
np.savez(save_file, mtx=mtx, dist=dist)
return mtx, dist
def save_output_image(img, filename, cmap=None):
mpimg.imsave(output_images_dir + filename, img, cmap=cmap)
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def color_threshold(img):
#R = img[:,:,0]
#G = img[:,:,1]
#B = img[:,:,2]
#binary = np.zeros_like(R)
#binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary = np.zeros_like(H)
binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1
return binary
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_lr_window_centroids(image, window_width, window_height, margin):
#window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
left_centroids = []
right_centroids = []
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)
y_base = int(image.shape[0] - window_height/2)
# Add what we found for the first layer
y_center = y_base
left_centroids.append((l_center, y_center))
right_centroids.append((r_center, y_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
y_center = int(y_base - (level * window_height))
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_max = np.argmax(conv_signal[l_min_index:l_max_index])
if l_max > 50:
left_centroids.append((l_center, y_center))
l_center = l_max+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_max = np.argmax(conv_signal[r_min_index:r_max_index])
if r_max > 50:
right_centroids.append((r_center, y_center))
r_center = r_max+r_min_index-offset
return left_centroids, right_centroids
def draw_window_boxes(img, l_points, r_points, window_width, window_height):
if len(l_points) > 0:
for p in l_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)
if len(r_points) > 0:
for p in r_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)
return img
def draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
#template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(l_points) # create a zero color channle
template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green
warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
return output
def draw_text(img, text, origin):
cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)
def pipeline_image(img, save_images=None, save_suffix='.jpg'):
if save_images:
print('begin pipeline_image', save_suffix)
undistorted = undistort(img)
if save_images:
save_output_image(undistorted, 'undistorted' + save_suffix)
#binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))
binary = color_threshold(undistorted)
if save_images:
save_output_image(binary, 'binary' + save_suffix, cmap='gray')
img_size = binary.shape[::-1]
src = np.float32(
[[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
dst = np.float32(
[[(img_size[0] / 4), 0],
[(img_size[0] / 4), img_size[1]],
[(img_size[0] * 3 / 4), img_size[1]],
[(img_size[0] * 3 / 4), 0]])
if save_images:
cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)
save_output_image(img, 'polygon' + save_suffix)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)
if save_images:
save_output_image(warped, 'warped' + save_suffix, cmap='gray')
window_width = 40
window_height = 60
#identified lane-line pixels and fit their positions with a polynomial
l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)
global last_l_points, last_r_points
if len(l_points) < 5 and len(last_l_points) > 0:
#print("less than 4 l_points:", len(r_points))
# use the previous points
l_points = last_l_points
else:
last_l_points = l_points
l_points = np.array(l_points, dtype=np.int32)
l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)
if len(r_points) < 5 and len(last_r_points) > 0:
#print("less than 4 r_points:", len(r_points))
r_points = last_r_points
else:
last_r_points = r_points
r_points = np.array(r_points, dtype=np.int32)
r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)
yval = np.arange(0, warped.shape[0])
l_xval = np.polyval(l_poly, yval)
r_xval = np.polyval(r_poly, yval)
if save_images:
lanes = warped*255
lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green
lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)
for p in l_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for p in r_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for x,y in zip(l_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)
for x,y in zip(r_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)
save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
#calculated the position of the vehicle with respect to center
lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix
direction = 'Left'
if lane_center_offset_m > 0:
direction = 'Right'
#calculated the radius of curvature of the lane
y_eval = np.max(yval)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)
right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)
draw_text(undistorted, "Radius: {:.1f}m {:.1f}m".format(left_curverad, right_curverad), (50, 50))
draw_text(undistorted, "{:.3f}m {} of Center".format(abs(lane_center_offset_m), direction), (50, 100))
output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)
if save_images:
save_output_image(output, 'output' + save_suffix)
return output
def process_test_images():
filenames = glob('test_images/*.jpg')
#filenames = ['test_images/test2.jpg']
for f in filenames:
img = load_image(f)
img_out = pipeline_image(img, True, '-' + f.split('/')[-1])
#show_before_after(img, img_out, 'gray')
def process_video(in_file, out_file):
clip = VideoFileClip(in_file)
video_clip = clip.fl_image(pipeline_image)
video_clip.write_videofile(out_file, audio=False)
def show_before_after(before, after, cmap=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
ax1.imshow(before)
ax1.set_title('Before')
ax2.imshow(after, cmap=cmap)
ax2.set_title('After')
plt.show()
def show_images(imgs, titles):
fig, axes = plt.subplots(3, 6, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
for ax, img, title in zip(axes.flat, imgs, titles):
ax.imshow(img)
ax.set_title(title)
plt.show()
last_l_points = []
last_r_points = []
mtx, dist = calibrate_camera()
process_test_images()
process_video('project_video.mp4', 'output.mp4')
process_video('challenge_video.mp4', 'challenge_output.mp4')
process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')
|
normal
|
{
"blob_id": "3ac30240577eda08343796abbd051d5d3b45beaf",
"index": 3416,
"step-1": "<mask token>\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\n<mask token>\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\n<mask token>\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n window = np.ones(window_width)\n left_centroids = []\n right_centroids = []\n l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] /\n 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] /\n 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(\n image.shape[1] / 2)\n y_base = int(image.shape[0] - window_height / 2)\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n for level in range(1, int(image.shape[0] / window_height)):\n y_center = int(y_base - level * window_height)\n image_layer = np.sum(image[int(image.shape[0] - (level + 1) *\n window_height):int(image.shape[0] - level * window_height), :],\n axis=0)\n conv_signal = np.convolve(window, image_layer)\n offset = window_width / 2\n l_min_index = int(max(l_center + offset - margin, 0))\n l_max_index = int(min(l_center + offset + margin, image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max + l_min_index - offset\n r_min_index = int(max(r_center + offset - margin, 0))\n r_max_index = int(min(r_center + offset + margin, image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max + r_min_index - offset\n return left_centroids, right_centroids\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, \n 255), thickness=2)\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\n<mask token>\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n",
"step-3": "<mask token>\noutput_images_dir = './output_images/'\ntest_images_dir = './test_images/'\noutput_video_file = 'output.mp4'\nmtx = None\ndist = None\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n window = np.ones(window_width)\n left_centroids = []\n right_centroids = []\n l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] /\n 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] /\n 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(\n image.shape[1] / 2)\n y_base = int(image.shape[0] - window_height / 2)\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n for level in range(1, int(image.shape[0] / window_height)):\n y_center = int(y_base - level * window_height)\n image_layer = np.sum(image[int(image.shape[0] - (level + 1) *\n window_height):int(image.shape[0] - level * window_height), :],\n axis=0)\n conv_signal = np.convolve(window, image_layer)\n offset = window_width / 2\n l_min_index = int(max(l_center + offset - margin, 0))\n l_max_index = int(min(l_center + offset + margin, image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max + l_min_index - offset\n r_min_index = int(max(r_center + offset - margin, 0))\n r_max_index = int(min(r_center + offset + margin, image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max + r_min_index - offset\n return left_centroids, right_centroids\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, \n 255), thickness=2)\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\nlast_l_points = []\nlast_r_points = []\nmtx, dist = calibrate_camera()\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n",
"step-4": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom glob import glob\nfrom moviepy.editor import VideoFileClip\noutput_images_dir = './output_images/'\ntest_images_dir = './test_images/'\noutput_video_file = 'output.mp4'\nmtx = None\ndist = None\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n window = np.ones(window_width)\n left_centroids = []\n right_centroids = []\n l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] /\n 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] /\n 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(\n image.shape[1] / 2)\n y_base = int(image.shape[0] - window_height / 2)\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n for level in range(1, int(image.shape[0] / window_height)):\n y_center = int(y_base - level * window_height)\n image_layer = np.sum(image[int(image.shape[0] - (level + 1) *\n window_height):int(image.shape[0] - level * window_height), :],\n axis=0)\n conv_signal = np.convolve(window, image_layer)\n offset = window_width / 2\n l_min_index = int(max(l_center + offset - margin, 0))\n l_max_index = int(min(l_center + offset + margin, image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max + l_min_index - offset\n r_min_index = int(max(r_center + offset - margin, 0))\n r_max_index = int(min(r_center + offset + margin, image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max + r_min_index - offset\n return left_centroids, right_centroids\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, \n 255), thickness=2)\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\nlast_l_points = []\nlast_r_points = []\nmtx, dist = calibrate_camera()\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n",
"step-5": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom glob import glob\nfrom moviepy.editor import VideoFileClip\n\noutput_images_dir = './output_images/'\ntest_images_dir = './test_images/'\noutput_video_file = 'output.mp4'\n\nmtx = None\ndist = None\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n\n objpoints = [] # 3D points in real world space\n imgpoints = [] # 2D points in image plane\n\n #Prepare object points, like (0,0,0), (1,0,0)...\n objp = np.zeros((rows*cols,3), np.float32)\n objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates\n\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)\n\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n\n return mtx, dist\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n\n return grad_binary\n\ndef color_threshold(img):\n #R = img[:,:,0]\n #G = img[:,:,1]\n #B = img[:,:,2]\n\n #binary = np.zeros_like(R)\n #binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1\n\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:,:,0]\n L = hls[:,:,1]\n S = hls[:,:,2]\n\n binary = np.zeros_like(H)\n binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1\n\n return binary\n\ndef window_mask(width, height, img_ref, center,level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1\n return output\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n #window_centroids = [] # Store the (left,right) window centroid positions per level\n window = np.ones(window_width) # Create our window template that we will use for convolutions\n\n left_centroids = []\n right_centroids = []\n\n # First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice\n # and then np.convolve the vertical image slice with the window template \n\n # Sum quarter bottom of image to get slice, could use a different ratio\n l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)\n l_center = np.argmax(np.convolve(window,l_sum))-window_width/2\n r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)\n r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)\n\n y_base = int(image.shape[0] - window_height/2)\n\n # Add what we found for the first layer\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n\n # Go through each layer looking for max pixel locations\n for level in range(1,(int)(image.shape[0]/window_height)):\n y_center = int(y_base - (level * window_height))\n\n # convolve the window into the vertical slice of the image\n image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)\n conv_signal = np.convolve(window, image_layer)\n # Find the best left centroid by using past left center as a reference\n # Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window\n offset = window_width/2\n l_min_index = int(max(l_center+offset-margin,0))\n l_max_index = int(min(l_center+offset+margin,image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max+l_min_index-offset\n # Find the best right centroid by using past right center as a reference\n r_min_index = int(max(r_center+offset-margin,0))\n r_max_index = int(min(r_center+offset+margin,image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max+r_min_index-offset\n\n return left_centroids, right_centroids\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)\n\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)\n\n return img\n\ndef draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):\n if len(window_centroids) > 0:\n\n # Points used to draw all the left and right windows\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n\n # Go through each level and draw the windows \n for level in range(0,len(window_centroids)):\n # Window_mask is a function to draw window areas\n l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)\n r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)\n # Add graphic points from window mask here to total pixels found \n l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255\n r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255\n\n # Draw the results\n #template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together\n zero_channel = np.zeros_like(l_points) # create a zero color channle \n template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green\n warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results\n\n # If no window centers found, just display orginal road image\n else:\n output = np.array(cv2.merge((warped,warped,warped)),np.uint8)\n\n return output\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n\n #binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n\n img_size = binary.shape[::-1]\n\n src = np.float32(\n [[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],\n [((img_size[0] / 6) - 10), img_size[1]],\n [(img_size[0] * 5 / 6) + 60, img_size[1]],\n [(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])\n dst = np.float32(\n [[(img_size[0] / 4), 0],\n [(img_size[0] / 4), img_size[1]],\n [(img_size[0] * 3 / 4), img_size[1]],\n [(img_size[0] * 3 / 4), 0]])\n\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n\n window_width = 40\n window_height = 60\n\n #identified lane-line pixels and fit their positions with a polynomial\n l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n #print(\"less than 4 l_points:\", len(r_points))\n # use the previous points\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)\n\n if len(r_points) < 5 and len(last_r_points) > 0:\n #print(\"less than 4 r_points:\", len(r_points))\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)\n\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n\n if save_images:\n lanes = warped*255\n lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)\n\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)\n\n for x,y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)\n for x,y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)\n\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n\n #calculated the position of the vehicle with respect to center\n lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n\n #calculated the radius of curvature of the lane\n y_eval = np.max(yval)\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])\n right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])\n # Now our radius of curvature is in meters\n\n #Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)\n draw_text(undistorted, \"Radius: {:.1f}m {:.1f}m\".format(left_curverad, right_curverad), (50, 50))\n draw_text(undistorted, \"{:.3f}m {} of Center\".format(abs(lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n\n return output\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n #filenames = ['test_images/test2.jpg']\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n #show_before_after(img, img_out, 'gray')\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n\n plt.show()\n\n\nlast_l_points = []\nlast_r_points = []\n\nmtx, dist = calibrate_camera()\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n\n",
"step-ids": [
14,
17,
18,
19,
20
]
}
|
[
14,
17,
18,
19,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in stdin:
data = line.strip().split(',')
if last_emp != '' and last_emp != emp_id:
print(
f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'
)
if len(data) == 5:
last_emp = emp_id
emp_id = data[1]
dep_id = data[0]
emp_surname = data[2]
emp_name = data[3]
position = data[4]
else:
dep_name = data[3]
num_of_emp = data[1]
head = data[2]
if last_emp == emp_id == '':
emp_id = 'new'
else:
last_emp = ''
emp_id = 'new'
print(
f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
last_emp = emp_id = ''
for line in stdin:
data = line.strip().split(',')
if last_emp != '' and last_emp != emp_id:
print(
f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'
)
if len(data) == 5:
last_emp = emp_id
emp_id = data[1]
dep_id = data[0]
emp_surname = data[2]
emp_name = data[3]
position = data[4]
else:
dep_name = data[3]
num_of_emp = data[1]
head = data[2]
if last_emp == emp_id == '':
emp_id = 'new'
else:
last_emp = ''
emp_id = 'new'
print(
f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'
)
<|reserved_special_token_1|>
from sys import stdin
last_emp = emp_id = ''
for line in stdin:
data = line.strip().split(',')
if last_emp != '' and last_emp != emp_id:
print(
f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'
)
if len(data) == 5:
last_emp = emp_id
emp_id = data[1]
dep_id = data[0]
emp_surname = data[2]
emp_name = data[3]
position = data[4]
else:
dep_name = data[3]
num_of_emp = data[1]
head = data[2]
if last_emp == emp_id == '':
emp_id = 'new'
else:
last_emp = ''
emp_id = 'new'
print(
f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'
)
<|reserved_special_token_1|>
from sys import stdin
last_emp = emp_id = ''
for line in stdin:
data = line.strip().split(',')
if last_emp != '' and last_emp != emp_id:
print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')
if len(data) == 5:
last_emp = emp_id
emp_id = data[1]
dep_id = data[0]
emp_surname = data[2]
emp_name = data[3]
position = data[4]
else:
dep_name = data[3]
num_of_emp = data[1]
head = data[2]
if last_emp == emp_id == '':
# last_emp = ''
emp_id = 'new'
else:
last_emp = ''
emp_id = 'new'
print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')
|
flexible
|
{
"blob_id": "3a2b1ddab422d450ad3b5684cbed1847d31fb8e6",
"index": 2839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in stdin:\n data = line.strip().split(',')\n if last_emp != '' and last_emp != emp_id:\n print(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\nprint(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n",
"step-3": "<mask token>\nlast_emp = emp_id = ''\nfor line in stdin:\n data = line.strip().split(',')\n if last_emp != '' and last_emp != emp_id:\n print(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\nprint(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n",
"step-4": "from sys import stdin\nlast_emp = emp_id = ''\nfor line in stdin:\n data = line.strip().split(',')\n if last_emp != '' and last_emp != emp_id:\n print(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\nprint(\n f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}'\n )\n",
"step-5": "from sys import stdin\n\nlast_emp = emp_id = ''\n\nfor line in stdin:\n data = line.strip().split(',')\n\n if last_emp != '' and last_emp != emp_id:\n print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')\n\n if len(data) == 5:\n last_emp = emp_id\n emp_id = data[1]\n dep_id = data[0]\n emp_surname = data[2]\n emp_name = data[3]\n position = data[4]\n else:\n dep_name = data[3]\n num_of_emp = data[1]\n head = data[2]\n if last_emp == emp_id == '':\n # last_emp = ''\n emp_id = 'new'\n else:\n last_emp = ''\n emp_id = 'new'\n\nprint(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from metricsManager import MetricsManager
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from metricsManager import MetricsManager
def TestDrawGraphs():
manager = MetricsManager()
manager.displayMetricsGraph()
return
def main():
TestDrawGraphs()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "4e8a5b0ba13921fb88d5d6371d50e7120ab01265",
"index": 737,
"step-1": "<mask token>\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from metricsManager import MetricsManager\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from metricsManager import MetricsManager\n\n\ndef TestDrawGraphs():\n manager = MetricsManager()\n manager.displayMetricsGraph()\n return\n\n\ndef main():\n TestDrawGraphs()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(x)
<|reserved_special_token_0|>
print(x)
<|reserved_special_token_0|>
print(x)
<|reserved_special_token_1|>
x = 2
print(x)
x = 54
print(x)
x = 'Cheese'
print(x)
<|reserved_special_token_1|>
####
#Some more on variables
####
#Variables are easily redefined.
#Let's start simple.
x=2 #x is going to start at 2
print (x)
x=54 #we are redefining x to equal 54
print (x)
x= "Cheese" #x is now the string 'cheese'
print (x)
#Try running this program to see x
#printed at each point
#Clearly variables can be manipulated easily,
#this can make them very useful
|
flexible
|
{
"blob_id": "dae8529aa58f1451d5acdd6607543c202c3c0c66",
"index": 3810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x)\n<mask token>\nprint(x)\n<mask token>\nprint(x)\n",
"step-3": "x = 2\nprint(x)\nx = 54\nprint(x)\nx = 'Cheese'\nprint(x)\n",
"step-4": "####\n#Some more on variables\n####\n\n#Variables are easily redefined. \n\n#Let's start simple.\n\nx=2 #x is going to start at 2\nprint (x) \nx=54 #we are redefining x to equal 54\nprint (x) \nx= \"Cheese\" #x is now the string 'cheese'\nprint (x)\n#Try running this program to see x \n#printed at each point\n\n#Clearly variables can be manipulated easily,\n#this can make them very useful\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Reddit API feed
import praw
import sys
import os
def main():
if os.getenv("REDDIT_CLIENT_ID") is None:
print "Set your Reddit environment variables:"
print "REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET"
sys.exit()
client_id = os.environ['REDDIT_CLIENT_ID']
client_secret = os.environ['REDDIT_CLIENT_SECRET']
try:
reddit_api = praw.Reddit(client_id = client_id,
client_secret = client_secret,
user_agent = "sentiment")
except:
print "Reddit auth failed."
sys.exit()
sub = raw_input("Subreddit: ")
keyword = raw_input("Keyword: ")
get_posts(keyword, sub, reddit_api)
# currently only dumps top 10 posts from subreddit
# regardless of keyword
def get_posts(keyword, sub, reddit_api):
for post in reddit_api.subreddit(sub).hot(limit=10):
print post.title
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "9543992e1b115f83640a07c4d4372be0fb465199",
"index": 3256,
"step-1": "# Reddit API feed\n\nimport praw\nimport sys\nimport os\n\ndef main():\n if os.getenv(\"REDDIT_CLIENT_ID\") is None:\n print \"Set your Reddit environment variables:\"\n print \"REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET\"\n sys.exit()\n client_id = os.environ['REDDIT_CLIENT_ID']\n client_secret = os.environ['REDDIT_CLIENT_SECRET']\n try:\n reddit_api = praw.Reddit(client_id = client_id,\n client_secret = client_secret,\n user_agent = \"sentiment\")\n except:\n print \"Reddit auth failed.\"\n sys.exit()\n sub = raw_input(\"Subreddit: \")\n keyword = raw_input(\"Keyword: \")\n get_posts(keyword, sub, reddit_api)\n\n# currently only dumps top 10 posts from subreddit\n# regardless of keyword\ndef get_posts(keyword, sub, reddit_api):\n for post in reddit_api.subreddit(sub).hot(limit=10):\n print post.title\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def get_prediction_data(data, model_path):
x = data.drop(PREDICT_X_SKIP_COLS, axis=1)
y = data[X_COLS]
model = joblib.load(model_path)
y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)
y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)
return y_pred, metrics
def prepare_data(data_folder, model_path):
train, test, na_value = dataset.read_data(data_folder)
x_train = train[X_COLS]
y_train = train[Y_COL]
x_test = test[X_COLS]
y_test = test[Y_COL]
out_train = train[Y_OUTPUT_COLS]
out_test = test[Y_OUTPUT_COLS]
x_pred_train, metrics_train = get_prediction_data(train, model_path)
x_pred_test, metrics_test = get_prediction_data(test, model_path)
train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':
out_train}
test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}
metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}
return train, test, metrics, na_value
def postprocess_data(out_data, y_pred):
y_output = out_data.copy()
y_output[Y_COL] = y_pred
return y_output
def train_evaluate(data_folder, output_folder, model_path):
model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)
print('Preparing data...')
train, test, metrics, na_value = prepare_data(data_folder, model_path)
print('Training...')
model = lstm.train(model, train['x'], train['y'])
model = lstm.train(model, train['x_pred'], train['y'])
print('Evaluating...')
y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],
METRICS_INFO)
y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],
test['y'], METRICS_INFO)
metrics['lstm_pred'] = metrics_lstm
metrics['reg_lstm_pred'] = metrics_reg_lstm
print('Postprocessing data...')
y_output = postprocess_data(test['out'], y_pred)
y_output_reg = postprocess_data(test['out'], y_pred_reg)
output_path = os.path.join(output_folder, 'pred.csv')
y_output.to_csv(output_path, index=False)
output_path = os.path.join(output_folder, 'pred_reg.csv')
y_output_reg.to_csv(output_path, index=False)
result = {'metrics': metrics, 'na_value': na_value}
result_path = os.path.join(output_folder, 'result.json')
json_config = json.dumps(result, indent=4)
with open(result_path, 'w') as result_file:
result_file.write(json_config)
model_path = os.path.join(output_folder, 'lstm.mdl')
torch.save(model, model_path)
print('Output files (model, result, prediction) saved to {}'.format(
output_folder))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help=
'specifies the data folder path', required=True)
parser.add_argument('--output_path', type=str, help=
'specifies the output folder path', required=True)
parser.add_argument('--regression_model_path', type=str, required=True,
help='specifies the regression model path')
return vars(parser.parse_args())
def main():
args = parse_args()
print('Args: {}'.format(args))
data_path = os.path.abspath(args['data_path'])
output_path = os.path.abspath(args['output_path'])
model_path = os.path.abspath(args['regression_model_path'])
train_evaluate(data_path, output_path, model_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_prediction_data(data, model_path):
x = data.drop(PREDICT_X_SKIP_COLS, axis=1)
y = data[X_COLS]
model = joblib.load(model_path)
y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)
y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)
return y_pred, metrics
def prepare_data(data_folder, model_path):
train, test, na_value = dataset.read_data(data_folder)
x_train = train[X_COLS]
y_train = train[Y_COL]
x_test = test[X_COLS]
y_test = test[Y_COL]
out_train = train[Y_OUTPUT_COLS]
out_test = test[Y_OUTPUT_COLS]
x_pred_train, metrics_train = get_prediction_data(train, model_path)
x_pred_test, metrics_test = get_prediction_data(test, model_path)
train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':
out_train}
test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}
metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}
return train, test, metrics, na_value
def postprocess_data(out_data, y_pred):
y_output = out_data.copy()
y_output[Y_COL] = y_pred
return y_output
def train_evaluate(data_folder, output_folder, model_path):
model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)
print('Preparing data...')
train, test, metrics, na_value = prepare_data(data_folder, model_path)
print('Training...')
model = lstm.train(model, train['x'], train['y'])
model = lstm.train(model, train['x_pred'], train['y'])
print('Evaluating...')
y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],
METRICS_INFO)
y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],
test['y'], METRICS_INFO)
metrics['lstm_pred'] = metrics_lstm
metrics['reg_lstm_pred'] = metrics_reg_lstm
print('Postprocessing data...')
y_output = postprocess_data(test['out'], y_pred)
y_output_reg = postprocess_data(test['out'], y_pred_reg)
output_path = os.path.join(output_folder, 'pred.csv')
y_output.to_csv(output_path, index=False)
output_path = os.path.join(output_folder, 'pred_reg.csv')
y_output_reg.to_csv(output_path, index=False)
result = {'metrics': metrics, 'na_value': na_value}
result_path = os.path.join(output_folder, 'result.json')
json_config = json.dumps(result, indent=4)
with open(result_path, 'w') as result_file:
result_file.write(json_config)
model_path = os.path.join(output_folder, 'lstm.mdl')
torch.save(model, model_path)
print('Output files (model, result, prediction) saved to {}'.format(
output_folder))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help=
'specifies the data folder path', required=True)
parser.add_argument('--output_path', type=str, help=
'specifies the output folder path', required=True)
parser.add_argument('--regression_model_path', type=str, required=True,
help='specifies the regression model path')
return vars(parser.parse_args())
def main():
args = parse_args()
print('Args: {}'.format(args))
data_path = os.path.abspath(args['data_path'])
output_path = os.path.abspath(args['output_path'])
model_path = os.path.abspath(args['regression_model_path'])
train_evaluate(data_path, output_path, model_path)
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PREDICT_X_SKIP_COLS = ['date', 'weight', 'ts_id', 'resp', 'resp_1',
'resp_2', 'resp_3', 'resp_4']
X_COLS = ['resp_1', 'resp_2', 'resp_3', 'resp_4']
Y_OUTPUT_COLS = ['date', 'ts_id']
Y_COL = ['resp']
METRICS_INFO = ['mse', 'r2', 'mape']
DROPOUT = 0.25
HIDDEN_SIZE = 20
def get_prediction_data(data, model_path):
x = data.drop(PREDICT_X_SKIP_COLS, axis=1)
y = data[X_COLS]
model = joblib.load(model_path)
y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)
y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)
return y_pred, metrics
def prepare_data(data_folder, model_path):
train, test, na_value = dataset.read_data(data_folder)
x_train = train[X_COLS]
y_train = train[Y_COL]
x_test = test[X_COLS]
y_test = test[Y_COL]
out_train = train[Y_OUTPUT_COLS]
out_test = test[Y_OUTPUT_COLS]
x_pred_train, metrics_train = get_prediction_data(train, model_path)
x_pred_test, metrics_test = get_prediction_data(test, model_path)
train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':
out_train}
test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}
metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}
return train, test, metrics, na_value
def postprocess_data(out_data, y_pred):
y_output = out_data.copy()
y_output[Y_COL] = y_pred
return y_output
def train_evaluate(data_folder, output_folder, model_path):
model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)
print('Preparing data...')
train, test, metrics, na_value = prepare_data(data_folder, model_path)
print('Training...')
model = lstm.train(model, train['x'], train['y'])
model = lstm.train(model, train['x_pred'], train['y'])
print('Evaluating...')
y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],
METRICS_INFO)
y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],
test['y'], METRICS_INFO)
metrics['lstm_pred'] = metrics_lstm
metrics['reg_lstm_pred'] = metrics_reg_lstm
print('Postprocessing data...')
y_output = postprocess_data(test['out'], y_pred)
y_output_reg = postprocess_data(test['out'], y_pred_reg)
output_path = os.path.join(output_folder, 'pred.csv')
y_output.to_csv(output_path, index=False)
output_path = os.path.join(output_folder, 'pred_reg.csv')
y_output_reg.to_csv(output_path, index=False)
result = {'metrics': metrics, 'na_value': na_value}
result_path = os.path.join(output_folder, 'result.json')
json_config = json.dumps(result, indent=4)
with open(result_path, 'w') as result_file:
result_file.write(json_config)
model_path = os.path.join(output_folder, 'lstm.mdl')
torch.save(model, model_path)
print('Output files (model, result, prediction) saved to {}'.format(
output_folder))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help=
'specifies the data folder path', required=True)
parser.add_argument('--output_path', type=str, help=
'specifies the output folder path', required=True)
parser.add_argument('--regression_model_path', type=str, required=True,
help='specifies the regression model path')
return vars(parser.parse_args())
def main():
args = parse_args()
print('Args: {}'.format(args))
data_path = os.path.abspath(args['data_path'])
output_path = os.path.abspath(args['output_path'])
model_path = os.path.abspath(args['regression_model_path'])
train_evaluate(data_path, output_path, model_path)
main()
<|reserved_special_token_1|>
import argparse, os, joblib, json, torch
import pandas as pd
from utils import regression, dataset, lstm
PREDICT_X_SKIP_COLS = ['date', 'weight', 'ts_id', 'resp', 'resp_1',
'resp_2', 'resp_3', 'resp_4']
X_COLS = ['resp_1', 'resp_2', 'resp_3', 'resp_4']
Y_OUTPUT_COLS = ['date', 'ts_id']
Y_COL = ['resp']
METRICS_INFO = ['mse', 'r2', 'mape']
DROPOUT = 0.25
HIDDEN_SIZE = 20
def get_prediction_data(data, model_path):
x = data.drop(PREDICT_X_SKIP_COLS, axis=1)
y = data[X_COLS]
model = joblib.load(model_path)
y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)
y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)
return y_pred, metrics
def prepare_data(data_folder, model_path):
train, test, na_value = dataset.read_data(data_folder)
x_train = train[X_COLS]
y_train = train[Y_COL]
x_test = test[X_COLS]
y_test = test[Y_COL]
out_train = train[Y_OUTPUT_COLS]
out_test = test[Y_OUTPUT_COLS]
x_pred_train, metrics_train = get_prediction_data(train, model_path)
x_pred_test, metrics_test = get_prediction_data(test, model_path)
train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':
out_train}
test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}
metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}
return train, test, metrics, na_value
def postprocess_data(out_data, y_pred):
y_output = out_data.copy()
y_output[Y_COL] = y_pred
return y_output
def train_evaluate(data_folder, output_folder, model_path):
model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)
print('Preparing data...')
train, test, metrics, na_value = prepare_data(data_folder, model_path)
print('Training...')
model = lstm.train(model, train['x'], train['y'])
model = lstm.train(model, train['x_pred'], train['y'])
print('Evaluating...')
y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],
METRICS_INFO)
y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],
test['y'], METRICS_INFO)
metrics['lstm_pred'] = metrics_lstm
metrics['reg_lstm_pred'] = metrics_reg_lstm
print('Postprocessing data...')
y_output = postprocess_data(test['out'], y_pred)
y_output_reg = postprocess_data(test['out'], y_pred_reg)
output_path = os.path.join(output_folder, 'pred.csv')
y_output.to_csv(output_path, index=False)
output_path = os.path.join(output_folder, 'pred_reg.csv')
y_output_reg.to_csv(output_path, index=False)
result = {'metrics': metrics, 'na_value': na_value}
result_path = os.path.join(output_folder, 'result.json')
json_config = json.dumps(result, indent=4)
with open(result_path, 'w') as result_file:
result_file.write(json_config)
model_path = os.path.join(output_folder, 'lstm.mdl')
torch.save(model, model_path)
print('Output files (model, result, prediction) saved to {}'.format(
output_folder))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help=
'specifies the data folder path', required=True)
parser.add_argument('--output_path', type=str, help=
'specifies the output folder path', required=True)
parser.add_argument('--regression_model_path', type=str, required=True,
help='specifies the regression model path')
return vars(parser.parse_args())
def main():
args = parse_args()
print('Args: {}'.format(args))
data_path = os.path.abspath(args['data_path'])
output_path = os.path.abspath(args['output_path'])
model_path = os.path.abspath(args['regression_model_path'])
train_evaluate(data_path, output_path, model_path)
main()
<|reserved_special_token_1|>
import argparse, os, joblib, json, torch
import pandas as pd
from utils import regression, dataset, lstm
PREDICT_X_SKIP_COLS = ["date", "weight", "ts_id", "resp", "resp_1", "resp_2", "resp_3", "resp_4"]
X_COLS = ["resp_1", "resp_2", "resp_3", "resp_4"]
Y_OUTPUT_COLS = ["date", "ts_id"]
Y_COL = ["resp"]
METRICS_INFO = ["mse", "r2", "mape"]
DROPOUT = 0.25
HIDDEN_SIZE = 20
def get_prediction_data(data, model_path):
x = data.drop(PREDICT_X_SKIP_COLS, axis=1)
y = data[X_COLS]
model = joblib.load(model_path)
(y_pred, metrics) = regression.evaluate(model, x, y, METRICS_INFO)
y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)
return (y_pred, metrics)
def prepare_data(data_folder, model_path):
(train, test, na_value) = dataset.read_data(data_folder)
x_train = train[X_COLS]
y_train = train[Y_COL]
x_test = test[X_COLS]
y_test = test[Y_COL]
out_train = train[Y_OUTPUT_COLS]
out_test = test[Y_OUTPUT_COLS]
(x_pred_train , metrics_train) = get_prediction_data(train, model_path)
(x_pred_test, metrics_test) = get_prediction_data(test, model_path)
train = { "x": x_train, "y": y_train, "x_pred": x_pred_train, "out": out_train}
test = { "x": x_test, "y": y_test, "x_pred": x_pred_test, "out": out_test}
metrics = {
"reg_train_pred": metrics_train,
"reg_test_pred": metrics_test
}
return (train, test, metrics, na_value)
def postprocess_data(out_data, y_pred):
y_output = out_data.copy()
y_output[Y_COL] = y_pred
return y_output
def train_evaluate(data_folder, output_folder, model_path):
model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)
print("Preparing data...")
(train, test, metrics, na_value) = prepare_data(data_folder, model_path)
print("Training...")
model = lstm.train(model, train["x"], train["y"])
model = lstm.train(model, train["x_pred"], train["y"])
print("Evaluating...")
(y_pred, metrics_lstm) = lstm.evaluate(model, test["x"],
test["y"], METRICS_INFO)
(y_pred_reg, metrics_reg_lstm) = lstm.evaluate(model,
test["x_pred"], test["y"], METRICS_INFO)
metrics["lstm_pred"] = metrics_lstm
metrics["reg_lstm_pred"] = metrics_reg_lstm
print("Postprocessing data...")
y_output = postprocess_data(test["out"], y_pred)
y_output_reg = postprocess_data(test["out"], y_pred_reg)
output_path = os.path.join(output_folder, "pred.csv")
y_output.to_csv(output_path, index=False)
output_path = os.path.join(output_folder, "pred_reg.csv")
y_output_reg.to_csv(output_path, index=False)
result = { "metrics": metrics, "na_value": na_value }
result_path = os.path.join(output_folder, "result.json")
json_config = json.dumps(result, indent=4)
with open(result_path, "w") as result_file:
result_file.write(json_config)
model_path = os.path.join(output_folder, "lstm.mdl")
torch.save(model, model_path)
print("Output files (model, result, prediction) saved to {}".format(
output_folder))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path", type=str, help="specifies the data folder path",
required=True)
parser.add_argument(
"--output_path", type=str, help="specifies the output folder path",
required=True)
parser.add_argument(
"--regression_model_path", type=str, required = True,
help="specifies the regression model path")
return vars(parser.parse_args())
def main():
args = parse_args()
print("Args: {}".format(args))
data_path = os.path.abspath(args["data_path"])
output_path = os.path.abspath(args["output_path"])
model_path = os.path.abspath(args["regression_model_path"])
train_evaluate(data_path, output_path, model_path)
main()
|
flexible
|
{
"blob_id": "4bdff51a4e277889f4d54d4ace7a0f5384e74f1e",
"index": 9017,
"step-1": "<mask token>\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\nmain()\n",
"step-3": "<mask token>\nPREDICT_X_SKIP_COLS = ['date', 'weight', 'ts_id', 'resp', 'resp_1',\n 'resp_2', 'resp_3', 'resp_4']\nX_COLS = ['resp_1', 'resp_2', 'resp_3', 'resp_4']\nY_OUTPUT_COLS = ['date', 'ts_id']\nY_COL = ['resp']\nMETRICS_INFO = ['mse', 'r2', 'mape']\nDROPOUT = 0.25\nHIDDEN_SIZE = 20\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\nmain()\n",
"step-4": "import argparse, os, joblib, json, torch\nimport pandas as pd\nfrom utils import regression, dataset, lstm\nPREDICT_X_SKIP_COLS = ['date', 'weight', 'ts_id', 'resp', 'resp_1',\n 'resp_2', 'resp_3', 'resp_4']\nX_COLS = ['resp_1', 'resp_2', 'resp_3', 'resp_4']\nY_OUTPUT_COLS = ['date', 'ts_id']\nY_COL = ['resp']\nMETRICS_INFO = ['mse', 'r2', 'mape']\nDROPOUT = 0.25\nHIDDEN_SIZE = 20\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\nmain()\n",
"step-5": "import argparse, os, joblib, json, torch\nimport pandas as pd\nfrom utils import regression, dataset, lstm\n\nPREDICT_X_SKIP_COLS = [\"date\", \"weight\", \"ts_id\", \"resp\", \"resp_1\", \"resp_2\", \"resp_3\", \"resp_4\"]\nX_COLS = [\"resp_1\", \"resp_2\", \"resp_3\", \"resp_4\"]\nY_OUTPUT_COLS = [\"date\", \"ts_id\"]\nY_COL = [\"resp\"]\nMETRICS_INFO = [\"mse\", \"r2\", \"mape\"]\nDROPOUT = 0.25\nHIDDEN_SIZE = 20\n\ndef get_prediction_data(data, model_path):\n\tx = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n\ty = data[X_COLS]\n\tmodel = joblib.load(model_path)\n\t(y_pred, metrics) = regression.evaluate(model, x, y, METRICS_INFO)\n\ty_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n\treturn (y_pred, metrics)\n\ndef prepare_data(data_folder, model_path):\n\t(train, test, na_value) = dataset.read_data(data_folder)\n\tx_train = train[X_COLS]\n\ty_train = train[Y_COL]\n\tx_test = test[X_COLS]\n\ty_test = test[Y_COL]\n\tout_train = train[Y_OUTPUT_COLS]\n\tout_test = test[Y_OUTPUT_COLS]\n\t(x_pred_train , metrics_train) = get_prediction_data(train, model_path)\n\t(x_pred_test, metrics_test) = get_prediction_data(test, model_path)\n\ttrain = { \"x\": x_train, \"y\": y_train, \"x_pred\": x_pred_train, \"out\": out_train}\n\ttest = { \"x\": x_test, \"y\": y_test, \"x_pred\": x_pred_test, \"out\": out_test}\n\tmetrics = {\n\t\t\"reg_train_pred\": metrics_train,\n\t\t\"reg_test_pred\": metrics_test\n\t}\n\treturn (train, test, metrics, na_value)\n\ndef postprocess_data(out_data, y_pred):\n\ty_output = out_data.copy()\n\ty_output[Y_COL] = y_pred\n\treturn y_output\n\ndef train_evaluate(data_folder, output_folder, model_path):\n\tmodel = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n\n\tprint(\"Preparing data...\")\n\t(train, test, metrics, na_value) = prepare_data(data_folder, model_path)\n\n\tprint(\"Training...\")\n\tmodel = lstm.train(model, train[\"x\"], train[\"y\"])\n\tmodel = lstm.train(model, train[\"x_pred\"], train[\"y\"])\n\n\tprint(\"Evaluating...\")\n\t(y_pred, metrics_lstm) = lstm.evaluate(model, test[\"x\"],\n\t\ttest[\"y\"], METRICS_INFO)\n\t(y_pred_reg, metrics_reg_lstm) = lstm.evaluate(model,\n\t\ttest[\"x_pred\"], test[\"y\"], METRICS_INFO)\n\tmetrics[\"lstm_pred\"] = metrics_lstm\n\tmetrics[\"reg_lstm_pred\"] = metrics_reg_lstm\n\n\tprint(\"Postprocessing data...\")\n\ty_output = postprocess_data(test[\"out\"], y_pred)\n\ty_output_reg = postprocess_data(test[\"out\"], y_pred_reg)\n\n\toutput_path = os.path.join(output_folder, \"pred.csv\")\n\ty_output.to_csv(output_path, index=False)\n\n\toutput_path = os.path.join(output_folder, \"pred_reg.csv\")\n\ty_output_reg.to_csv(output_path, index=False)\n\n\tresult = { \"metrics\": metrics, \"na_value\": na_value }\n\tresult_path = os.path.join(output_folder, \"result.json\")\n\tjson_config = json.dumps(result, indent=4)\n\twith open(result_path, \"w\") as result_file:\n\t\tresult_file.write(json_config)\n\n\tmodel_path = os.path.join(output_folder, \"lstm.mdl\")\n\ttorch.save(model, model_path)\n\tprint(\"Output files (model, result, prediction) saved to {}\".format(\n\t\toutput_folder))\n\ndef parse_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\n\t\t\"--data_path\", type=str, help=\"specifies the data folder path\",\n\t\trequired=True)\n\tparser.add_argument(\n\t\t\"--output_path\", type=str, help=\"specifies the output folder path\",\n\t\trequired=True)\n\tparser.add_argument(\n\t\t\"--regression_model_path\", type=str, required = True,\n\t\thelp=\"specifies the regression model path\")\n\treturn vars(parser.parse_args())\n\ndef main():\n\targs = parse_args()\n\tprint(\"Args: {}\".format(args))\n\tdata_path = os.path.abspath(args[\"data_path\"])\n\toutput_path = os.path.abspath(args[\"output_path\"])\n\tmodel_path = os.path.abspath(args[\"regression_model_path\"])\n\ttrain_evaluate(data_path, output_path, model_path)\n\nmain()\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(round(Qf, 2))
<|reserved_special_token_1|>
j = float(input('juros'))
Q0 = 1500
t = 36
Qf = Q0 * (1 + j) ** t
print(round(Qf, 2))
<|reserved_special_token_1|>
j= float(input("juros"))
Q0= 1500
t= 36
Qf=Q0*(1+j)**t
print(round(Qf,2))
|
flexible
|
{
"blob_id": "700d6e0c7dab58ed0157265ff78021923c17e397",
"index": 5619,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(round(Qf, 2))\n",
"step-3": "j = float(input('juros'))\nQ0 = 1500\nt = 36\nQf = Q0 * (1 + j) ** t\nprint(round(Qf, 2))\n",
"step-4": "j= float(input(\"juros\"))\nQ0= 1500\nt= 36\nQf=Q0*(1+j)**t\nprint(round(Qf,2))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@runnable
class RoleMixin(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _add_service(self, cls, *args, **kwargs):
pipe, peer = zpipe(self.ctx)
service = cls(peer, self.__endpoint, self.__uuid, self.
__config_service, *args, **kwargs)
self.__services[pipe] = service
if Configuration == cls:
self.__config_service = service
<|reserved_special_token_0|>
def play(self):
for service in self.__services.values():
service.start()
self.run_state()
self.logger.debug('waiting for control commands')
while self.in_running_state:
try:
msg = self.__recv_control()
if 'STOP' == msg:
self.__send_control_str('OKAY')
self.logger.debug('received STOP control command')
self.stop_state()
break
else:
self.__send_control_str('WTF')
self.logger.error('unknown control command: %s' % msg)
except ZMQError as e:
if e.errno == ETERM:
self.logger.debug('received ETERM')
self.error_state()
break
else:
raise
except KeyboardInterrupt:
self.logger.debug('received KeyboardInterrupt')
self.stop_state()
break
return self.__cleanup()
<|reserved_special_token_0|>
def __stop(self):
""" try to stop all of this Role's services """
poller = Poller()
for pipe, svc in self.__services.items():
pipe.send_string('STOP')
self.logger.debug('sent STOP command to %s service' % svc)
poller.register(pipe, POLLIN)
sleep(1)
max_attempts = len(self.__services)
attempts = 0
while self.__some_alive() and attempts < max_attempts:
attempts += 1
items = dict(poller.poll(60000))
alive = dict(self.__services)
for pipe, svc in alive.items():
if pipe in items:
reply = pipe.recv_string()
if 'STOPPED' == reply:
self.logger.debug(
'received STOPPED control reply from %s service' %
svc)
svc.join(timeout=5)
if svc.is_alive():
self.logger.error(
'%s service is still alive; not waiting' % svc)
else:
self.logger.debug('%s service thread stopped' % svc
)
poller.unregister(pipe)
pipe.close()
del self.__services[pipe]
else:
self.logger.debug('unknown control reply: %s' % reply)
if len(self.__services) > 0:
msg = '%s services still alive after %d cycles; ' % ([str(s
) for s in self.__services.values()], attempts)
if attempts < max_attempts:
msg += 'waiting'
else:
msg += 'giving up'
self.logger.debug(msg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@runnable
class RoleMixin(object):
<|reserved_special_token_0|>
def __str__(self):
return self.__class__.__name__
def __send_control_str(self, message):
self.__control_pipe.send_string(message)
def __recv_control(self):
return self.__control_pipe.recv_string()
def get_config_service(self):
return self.__config_service
def get_config_service_kvdict(self):
assert self.__config_service is not None
return self.__config_service.copy_kvdict()
def _add_service(self, cls, *args, **kwargs):
pipe, peer = zpipe(self.ctx)
service = cls(peer, self.__endpoint, self.__uuid, self.
__config_service, *args, **kwargs)
self.__services[pipe] = service
if Configuration == cls:
self.__config_service = service
<|reserved_special_token_0|>
def play(self):
for service in self.__services.values():
service.start()
self.run_state()
self.logger.debug('waiting for control commands')
while self.in_running_state:
try:
msg = self.__recv_control()
if 'STOP' == msg:
self.__send_control_str('OKAY')
self.logger.debug('received STOP control command')
self.stop_state()
break
else:
self.__send_control_str('WTF')
self.logger.error('unknown control command: %s' % msg)
except ZMQError as e:
if e.errno == ETERM:
self.logger.debug('received ETERM')
self.error_state()
break
else:
raise
except KeyboardInterrupt:
self.logger.debug('received KeyboardInterrupt')
self.stop_state()
break
return self.__cleanup()
def __cleanup(self):
if not self.in_errored_state:
self.__stop()
for pipe in self.__services:
pipe.close()
del self.__services
self.__control_pipe.close()
del self.__control_pipe
self.logger.debug('role cleanup finished; exiting')
def __stop(self):
""" try to stop all of this Role's services """
poller = Poller()
for pipe, svc in self.__services.items():
pipe.send_string('STOP')
self.logger.debug('sent STOP command to %s service' % svc)
poller.register(pipe, POLLIN)
sleep(1)
max_attempts = len(self.__services)
attempts = 0
while self.__some_alive() and attempts < max_attempts:
attempts += 1
items = dict(poller.poll(60000))
alive = dict(self.__services)
for pipe, svc in alive.items():
if pipe in items:
reply = pipe.recv_string()
if 'STOPPED' == reply:
self.logger.debug(
'received STOPPED control reply from %s service' %
svc)
svc.join(timeout=5)
if svc.is_alive():
self.logger.error(
'%s service is still alive; not waiting' % svc)
else:
self.logger.debug('%s service thread stopped' % svc
)
poller.unregister(pipe)
pipe.close()
del self.__services[pipe]
else:
self.logger.debug('unknown control reply: %s' % reply)
if len(self.__services) > 0:
msg = '%s services still alive after %d cycles; ' % ([str(s
) for s in self.__services.values()], attempts)
if attempts < max_attempts:
msg += 'waiting'
else:
msg += 'giving up'
self.logger.debug(msg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@runnable
class RoleMixin(object):
<|reserved_special_token_0|>
def __str__(self):
return self.__class__.__name__
def __send_control_str(self, message):
self.__control_pipe.send_string(message)
def __recv_control(self):
return self.__control_pipe.recv_string()
def get_config_service(self):
return self.__config_service
def get_config_service_kvdict(self):
assert self.__config_service is not None
return self.__config_service.copy_kvdict()
def _add_service(self, cls, *args, **kwargs):
pipe, peer = zpipe(self.ctx)
service = cls(peer, self.__endpoint, self.__uuid, self.
__config_service, *args, **kwargs)
self.__services[pipe] = service
if Configuration == cls:
self.__config_service = service
def sos(self):
SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)
def play(self):
for service in self.__services.values():
service.start()
self.run_state()
self.logger.debug('waiting for control commands')
while self.in_running_state:
try:
msg = self.__recv_control()
if 'STOP' == msg:
self.__send_control_str('OKAY')
self.logger.debug('received STOP control command')
self.stop_state()
break
else:
self.__send_control_str('WTF')
self.logger.error('unknown control command: %s' % msg)
except ZMQError as e:
if e.errno == ETERM:
self.logger.debug('received ETERM')
self.error_state()
break
else:
raise
except KeyboardInterrupt:
self.logger.debug('received KeyboardInterrupt')
self.stop_state()
break
return self.__cleanup()
def __cleanup(self):
if not self.in_errored_state:
self.__stop()
for pipe in self.__services:
pipe.close()
del self.__services
self.__control_pipe.close()
del self.__control_pipe
self.logger.debug('role cleanup finished; exiting')
def __stop(self):
""" try to stop all of this Role's services """
poller = Poller()
for pipe, svc in self.__services.items():
pipe.send_string('STOP')
self.logger.debug('sent STOP command to %s service' % svc)
poller.register(pipe, POLLIN)
sleep(1)
max_attempts = len(self.__services)
attempts = 0
while self.__some_alive() and attempts < max_attempts:
attempts += 1
items = dict(poller.poll(60000))
alive = dict(self.__services)
for pipe, svc in alive.items():
if pipe in items:
reply = pipe.recv_string()
if 'STOPPED' == reply:
self.logger.debug(
'received STOPPED control reply from %s service' %
svc)
svc.join(timeout=5)
if svc.is_alive():
self.logger.error(
'%s service is still alive; not waiting' % svc)
else:
self.logger.debug('%s service thread stopped' % svc
)
poller.unregister(pipe)
pipe.close()
del self.__services[pipe]
else:
self.logger.debug('unknown control reply: %s' % reply)
if len(self.__services) > 0:
msg = '%s services still alive after %d cycles; ' % ([str(s
) for s in self.__services.values()], attempts)
if attempts < max_attempts:
msg += 'waiting'
else:
msg += 'giving up'
self.logger.debug(msg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from logging import getLogger
from time import sleep
from uuid import UUID
from zmq import Context, Poller, POLLIN, ZMQError, ETERM
from zhelpers import zpipe
from dcamp.service.configuration import Configuration
from dcamp.types.messages.control import SOS
from dcamp.types.specs import EndpntSpec
from dcamp.util.decorators import runnable
@runnable
class RoleMixin(object):
def __init__(self, pipe, ep, uuid):
self.ctx = Context.instance()
self.__control_pipe = pipe
assert isinstance(ep, EndpntSpec)
self.__endpoint = ep
assert isinstance(uuid, UUID)
self.__uuid = uuid
self.__config_service = None
self.logger = getLogger('dcamp.role.%s' % self)
self.__services = {}
def __str__(self):
return self.__class__.__name__
def __send_control_str(self, message):
self.__control_pipe.send_string(message)
def __recv_control(self):
return self.__control_pipe.recv_string()
def get_config_service(self):
return self.__config_service
def get_config_service_kvdict(self):
assert self.__config_service is not None
return self.__config_service.copy_kvdict()
def _add_service(self, cls, *args, **kwargs):
pipe, peer = zpipe(self.ctx)
service = cls(peer, self.__endpoint, self.__uuid, self.
__config_service, *args, **kwargs)
self.__services[pipe] = service
if Configuration == cls:
self.__config_service = service
def sos(self):
SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)
def play(self):
for service in self.__services.values():
service.start()
self.run_state()
self.logger.debug('waiting for control commands')
while self.in_running_state:
try:
msg = self.__recv_control()
if 'STOP' == msg:
self.__send_control_str('OKAY')
self.logger.debug('received STOP control command')
self.stop_state()
break
else:
self.__send_control_str('WTF')
self.logger.error('unknown control command: %s' % msg)
except ZMQError as e:
if e.errno == ETERM:
self.logger.debug('received ETERM')
self.error_state()
break
else:
raise
except KeyboardInterrupt:
self.logger.debug('received KeyboardInterrupt')
self.stop_state()
break
return self.__cleanup()
def __cleanup(self):
if not self.in_errored_state:
self.__stop()
for pipe in self.__services:
pipe.close()
del self.__services
self.__control_pipe.close()
del self.__control_pipe
self.logger.debug('role cleanup finished; exiting')
def __stop(self):
""" try to stop all of this Role's services """
poller = Poller()
for pipe, svc in self.__services.items():
pipe.send_string('STOP')
self.logger.debug('sent STOP command to %s service' % svc)
poller.register(pipe, POLLIN)
sleep(1)
max_attempts = len(self.__services)
attempts = 0
while self.__some_alive() and attempts < max_attempts:
attempts += 1
items = dict(poller.poll(60000))
alive = dict(self.__services)
for pipe, svc in alive.items():
if pipe in items:
reply = pipe.recv_string()
if 'STOPPED' == reply:
self.logger.debug(
'received STOPPED control reply from %s service' %
svc)
svc.join(timeout=5)
if svc.is_alive():
self.logger.error(
'%s service is still alive; not waiting' % svc)
else:
self.logger.debug('%s service thread stopped' % svc
)
poller.unregister(pipe)
pipe.close()
del self.__services[pipe]
else:
self.logger.debug('unknown control reply: %s' % reply)
if len(self.__services) > 0:
msg = '%s services still alive after %d cycles; ' % ([str(s
) for s in self.__services.values()], attempts)
if attempts < max_attempts:
msg += 'waiting'
else:
msg += 'giving up'
self.logger.debug(msg)
def __some_alive(self):
"""returns True if at least one service of this Role is still running"""
for service in self.__services.values():
if service.is_alive():
return True
return False
<|reserved_special_token_1|>
from logging import getLogger
from time import sleep
from uuid import UUID
from zmq import Context, Poller, POLLIN, ZMQError, ETERM # pylint: disable-msg=E0611
from zhelpers import zpipe
from dcamp.service.configuration import Configuration
from dcamp.types.messages.control import SOS
from dcamp.types.specs import EndpntSpec
from dcamp.util.decorators import runnable
@runnable
class RoleMixin(object):
def __init__(
self,
pipe,
ep,
uuid,
):
self.ctx = Context.instance()
self.__control_pipe = pipe
assert isinstance(ep, EndpntSpec)
self.__endpoint = ep
assert isinstance(uuid, UUID)
self.__uuid = uuid
self.__config_service = None
self.logger = getLogger('dcamp.role.%s' % self)
# { pipe: service, ...}
self.__services = {}
def __str__(self):
return self.__class__.__name__
def __send_control_str(self, message):
self.__control_pipe.send_string(message)
def __recv_control(self):
return self.__control_pipe.recv_string()
def get_config_service(self):
return self.__config_service
def get_config_service_kvdict(self):
assert self.__config_service is not None
return self.__config_service.copy_kvdict()
def _add_service(self, cls, *args, **kwargs):
pipe, peer = zpipe(self.ctx) # create control socket pair
# create service, passing local values along with rest of given args
service = cls(peer, self.__endpoint, self.__uuid, self.__config_service, *args, **kwargs)
self.__services[pipe] = service # add to our dict, using pipe socket as key
if Configuration == cls:
self.__config_service = service
def sos(self):
SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)
def play(self):
# start each service thread
for service in self.__services.values():
service.start()
# @todo: wait for READY message from each service / issue #37
self.run_state()
self.logger.debug('waiting for control commands')
# listen for control commands from caller
while self.in_running_state:
try:
msg = self.__recv_control()
if 'STOP' == msg:
self.__send_control_str('OKAY')
self.logger.debug('received STOP control command')
self.stop_state()
break
else:
self.__send_control_str('WTF')
self.logger.error('unknown control command: %s' % msg)
except ZMQError as e:
if e.errno == ETERM:
self.logger.debug('received ETERM')
self.error_state()
break
else:
raise
except KeyboardInterrupt: # only for roles played by dcamp.App
self.logger.debug('received KeyboardInterrupt')
self.stop_state()
break
# role is exiting; cleanup
return self.__cleanup()
def __cleanup(self):
# stop our services cleanly (if we can)
if not self.in_errored_state:
# @todo: this might raise an exception / issue #38
self.__stop()
# shared context; will be term()'ed by caller
# close all service sockets
for pipe in self.__services:
pipe.close()
del self.__services
# close our own control pipe
self.__control_pipe.close()
del self.__control_pipe
self.logger.debug('role cleanup finished; exiting')
def __stop(self):
""" try to stop all of this Role's services """
# send commands
poller = Poller()
for (pipe, svc) in self.__services.items():
pipe.send_string('STOP')
self.logger.debug('sent STOP command to %s service' % svc)
poller.register(pipe, POLLIN)
# give services a few seconds to cleanup and exit before checking responses
sleep(1)
max_attempts = len(self.__services)
attempts = 0
while self.__some_alive() and attempts < max_attempts:
attempts += 1
# poll for any replies
items = dict(poller.poll(60000)) # wait for messages
# mark responding services as stopped
alive = dict(self.__services) # make copy
for (pipe, svc) in alive.items():
if pipe in items:
reply = pipe.recv_string()
if 'STOPPED' == reply:
self.logger.debug('received STOPPED control reply from %s service' % svc)
svc.join(timeout=5) # STOPPED response should be sent right before svc exit
if svc.is_alive():
self.logger.error('%s service is still alive; not waiting' % svc)
else:
self.logger.debug('%s service thread stopped' % svc)
poller.unregister(pipe)
pipe.close()
del (self.__services[pipe])
else:
self.logger.debug('unknown control reply: %s' % reply)
# log some useful info
if len(self.__services) > 0:
msg = '%s services still alive after %d cycles; ' % (
[str(s) for s in self.__services.values()], attempts)
if attempts < max_attempts:
msg += 'waiting'
else:
msg += 'giving up'
self.logger.debug(msg)
def __some_alive(self):
"""returns True if at least one service of this Role is still running"""
for service in self.__services.values():
if service.is_alive():
return True
return False
|
flexible
|
{
"blob_id": "fee757b91f8c2ca1c105d7e67636772a8b5eafd5",
"index": 8158,
"step-1": "<mask token>\n\n\n@runnable\nclass RoleMixin(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n <mask token>\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n <mask token>\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n <mask token>\n",
"step-2": "<mask token>\n\n\n@runnable\nclass RoleMixin(object):\n <mask token>\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n <mask token>\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n\n def __cleanup(self):\n if not self.in_errored_state:\n self.__stop()\n for pipe in self.__services:\n pipe.close()\n del self.__services\n self.__control_pipe.close()\n del self.__control_pipe\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n <mask token>\n",
"step-3": "<mask token>\n\n\n@runnable\nclass RoleMixin(object):\n <mask token>\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n\n def sos(self):\n SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n\n def __cleanup(self):\n if not self.in_errored_state:\n self.__stop()\n for pipe in self.__services:\n pipe.close()\n del self.__services\n self.__control_pipe.close()\n del self.__control_pipe\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n <mask token>\n",
"step-4": "from logging import getLogger\nfrom time import sleep\nfrom uuid import UUID\nfrom zmq import Context, Poller, POLLIN, ZMQError, ETERM\nfrom zhelpers import zpipe\nfrom dcamp.service.configuration import Configuration\nfrom dcamp.types.messages.control import SOS\nfrom dcamp.types.specs import EndpntSpec\nfrom dcamp.util.decorators import runnable\n\n\n@runnable\nclass RoleMixin(object):\n\n def __init__(self, pipe, ep, uuid):\n self.ctx = Context.instance()\n self.__control_pipe = pipe\n assert isinstance(ep, EndpntSpec)\n self.__endpoint = ep\n assert isinstance(uuid, UUID)\n self.__uuid = uuid\n self.__config_service = None\n self.logger = getLogger('dcamp.role.%s' % self)\n self.__services = {}\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n\n def sos(self):\n SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n\n def __cleanup(self):\n if not self.in_errored_state:\n self.__stop()\n for pipe in self.__services:\n pipe.close()\n del self.__services\n self.__control_pipe.close()\n del self.__control_pipe\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n\n def __some_alive(self):\n \"\"\"returns True if at least one service of this Role is still running\"\"\"\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False\n",
"step-5": "from logging import getLogger\nfrom time import sleep\nfrom uuid import UUID\n\nfrom zmq import Context, Poller, POLLIN, ZMQError, ETERM # pylint: disable-msg=E0611\nfrom zhelpers import zpipe\n\nfrom dcamp.service.configuration import Configuration\nfrom dcamp.types.messages.control import SOS\nfrom dcamp.types.specs import EndpntSpec\nfrom dcamp.util.decorators import runnable\n\n\n@runnable\nclass RoleMixin(object):\n def __init__(\n self,\n pipe,\n ep,\n uuid,\n ):\n self.ctx = Context.instance()\n self.__control_pipe = pipe\n\n assert isinstance(ep, EndpntSpec)\n self.__endpoint = ep\n\n assert isinstance(uuid, UUID)\n self.__uuid = uuid\n\n self.__config_service = None\n\n self.logger = getLogger('dcamp.role.%s' % self)\n\n # { pipe: service, ...}\n self.__services = {}\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx) # create control socket pair\n # create service, passing local values along with rest of given args\n service = cls(peer, self.__endpoint, self.__uuid, self.__config_service, *args, **kwargs)\n self.__services[pipe] = service # add to our dict, using pipe socket as key\n if Configuration == cls:\n self.__config_service = service\n\n def sos(self):\n SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)\n\n def play(self):\n # start each service thread\n for service in self.__services.values():\n service.start()\n\n # @todo: wait for READY message from each service / issue #37\n\n self.run_state()\n self.logger.debug('waiting for control commands')\n\n # listen for control commands from caller\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt: # only for roles played by dcamp.App\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n\n # role is exiting; cleanup\n return self.__cleanup()\n\n def __cleanup(self):\n # stop our services cleanly (if we can)\n if not self.in_errored_state:\n # @todo: this might raise an exception / issue #38\n self.__stop()\n\n # shared context; will be term()'ed by caller\n\n # close all service sockets\n for pipe in self.__services:\n pipe.close()\n del self.__services\n\n # close our own control pipe\n self.__control_pipe.close()\n del self.__control_pipe\n\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n\n # send commands\n poller = Poller()\n for (pipe, svc) in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n\n # give services a few seconds to cleanup and exit before checking responses\n sleep(1)\n\n max_attempts = len(self.__services)\n attempts = 0\n\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n\n # poll for any replies\n items = dict(poller.poll(60000)) # wait for messages\n\n # mark responding services as stopped\n alive = dict(self.__services) # make copy\n for (pipe, svc) in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug('received STOPPED control reply from %s service' % svc)\n svc.join(timeout=5) # STOPPED response should be sent right before svc exit\n if svc.is_alive():\n self.logger.error('%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc)\n poller.unregister(pipe)\n pipe.close()\n del (self.__services[pipe])\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n\n # log some useful info\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % (\n [str(s) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n\n def __some_alive(self):\n \"\"\"returns True if at least one service of this Role is still running\"\"\"\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False\n",
"step-ids": [
4,
10,
11,
14,
15
]
}
|
[
4,
10,
11,
14,
15
] |
<|reserved_special_token_0|>
class SingleMode(NinePalaceGame):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def player_play(self, i, j):
if not self.game_is_over and not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.dominance)
self.dominance = self.computer
return 1
return 0
def computer_play(self):
if not self.game_is_over:
while 1:
i, j = random.choice(range(3)), random.choice(range(3))
if not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.computer)
self.dominance = self.player
break
<|reserved_special_token_0|>
def reset(self):
super().reset()
self.dominance = self.player
self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.main_game_window.withdraw()
self.choose_one_window.update()
self.choose_one_window.deiconify()
def button_function(self, i, j):
if self.player_play(i, j):
self.judge()
self.computer_play()
self.judge()
<|reserved_special_token_0|>
def create_choose_one_window(self):
self.choose_one_window = tk.Toplevel(self.main_game_window)
self.choose_one_window.title('choose one window')
self.choose_one_window.geometry('500x500')
choose_one_window_billboard = tk.StringVar(master=self.
choose_one_window, value='Choose you want')
use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,
height=5, textvariable=choose_one_window_billboard)
use_O_or_X.pack()
use_O = tk.Button(self.choose_one_window, text='I want use O',
width=40, height=5, command=partial(self.set_O_or_X, 'O'))
use_O.pack()
use_X = tk.Button(self.choose_one_window, text='I want use X',
width=40, height=5, command=partial(self.set_O_or_X, 'X'))
use_X.pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SingleMode(NinePalaceGame):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def player_play(self, i, j):
if not self.game_is_over and not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.dominance)
self.dominance = self.computer
return 1
return 0
def computer_play(self):
if not self.game_is_over:
while 1:
i, j = random.choice(range(3)), random.choice(range(3))
if not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.computer)
self.dominance = self.player
break
<|reserved_special_token_0|>
def reset(self):
super().reset()
self.dominance = self.player
self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.main_game_window.withdraw()
self.choose_one_window.update()
self.choose_one_window.deiconify()
def button_function(self, i, j):
if self.player_play(i, j):
self.judge()
self.computer_play()
self.judge()
def set_O_or_X(self, use):
self.player = use
if use == 'X':
self.computer = 'O'
self.computer_play()
else:
self.computer = 'X'
self.dominance = self.player
self.choose_one_window.withdraw()
self.main_game_window.update()
self.main_game_window.deiconify()
def create_choose_one_window(self):
self.choose_one_window = tk.Toplevel(self.main_game_window)
self.choose_one_window.title('choose one window')
self.choose_one_window.geometry('500x500')
choose_one_window_billboard = tk.StringVar(master=self.
choose_one_window, value='Choose you want')
use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,
height=5, textvariable=choose_one_window_billboard)
use_O_or_X.pack()
use_O = tk.Button(self.choose_one_window, text='I want use O',
width=40, height=5, command=partial(self.set_O_or_X, 'O'))
use_O.pack()
use_X = tk.Button(self.choose_one_window, text='I want use X',
width=40, height=5, command=partial(self.set_O_or_X, 'X'))
use_X.pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SingleMode(NinePalaceGame):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
self.create_choose_one_window()
super().__init__()
self.main_game_window.mainloop()
def player_play(self, i, j):
if not self.game_is_over and not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.dominance)
self.dominance = self.computer
return 1
return 0
def computer_play(self):
if not self.game_is_over:
while 1:
i, j = random.choice(range(3)), random.choice(range(3))
if not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.computer)
self.dominance = self.player
break
def judge(self):
if self.check_win(self.player):
self.game_is_over = 1
self.billboard_value.set('Player is win!')
elif self.check_win(self.computer):
self.game_is_over = 1
self.billboard_value.set('Computer is win!')
elif self.check_game_over():
self.game_is_over = 1
self.billboard_value.set('Game over!')
def reset(self):
super().reset()
self.dominance = self.player
self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.main_game_window.withdraw()
self.choose_one_window.update()
self.choose_one_window.deiconify()
def button_function(self, i, j):
if self.player_play(i, j):
self.judge()
self.computer_play()
self.judge()
def set_O_or_X(self, use):
self.player = use
if use == 'X':
self.computer = 'O'
self.computer_play()
else:
self.computer = 'X'
self.dominance = self.player
self.choose_one_window.withdraw()
self.main_game_window.update()
self.main_game_window.deiconify()
def create_choose_one_window(self):
self.choose_one_window = tk.Toplevel(self.main_game_window)
self.choose_one_window.title('choose one window')
self.choose_one_window.geometry('500x500')
choose_one_window_billboard = tk.StringVar(master=self.
choose_one_window, value='Choose you want')
use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,
height=5, textvariable=choose_one_window_billboard)
use_O_or_X.pack()
use_O = tk.Button(self.choose_one_window, text='I want use O',
width=40, height=5, command=partial(self.set_O_or_X, 'O'))
use_O.pack()
use_X = tk.Button(self.choose_one_window, text='I want use X',
width=40, height=5, command=partial(self.set_O_or_X, 'X'))
use_X.pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SingleMode(NinePalaceGame):
player1 = player = 'O'
player2 = computer = 'X'
def __init__(self):
self.create_choose_one_window()
super().__init__()
self.main_game_window.mainloop()
def player_play(self, i, j):
if not self.game_is_over and not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.dominance)
self.dominance = self.computer
return 1
return 0
def computer_play(self):
if not self.game_is_over:
while 1:
i, j = random.choice(range(3)), random.choice(range(3))
if not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.computer)
self.dominance = self.player
break
def judge(self):
if self.check_win(self.player):
self.game_is_over = 1
self.billboard_value.set('Player is win!')
elif self.check_win(self.computer):
self.game_is_over = 1
self.billboard_value.set('Computer is win!')
elif self.check_game_over():
self.game_is_over = 1
self.billboard_value.set('Game over!')
def reset(self):
super().reset()
self.dominance = self.player
self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.main_game_window.withdraw()
self.choose_one_window.update()
self.choose_one_window.deiconify()
def button_function(self, i, j):
if self.player_play(i, j):
self.judge()
self.computer_play()
self.judge()
def set_O_or_X(self, use):
self.player = use
if use == 'X':
self.computer = 'O'
self.computer_play()
else:
self.computer = 'X'
self.dominance = self.player
self.choose_one_window.withdraw()
self.main_game_window.update()
self.main_game_window.deiconify()
def create_choose_one_window(self):
self.choose_one_window = tk.Toplevel(self.main_game_window)
self.choose_one_window.title('choose one window')
self.choose_one_window.geometry('500x500')
choose_one_window_billboard = tk.StringVar(master=self.
choose_one_window, value='Choose you want')
use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,
height=5, textvariable=choose_one_window_billboard)
use_O_or_X.pack()
use_O = tk.Button(self.choose_one_window, text='I want use O',
width=40, height=5, command=partial(self.set_O_or_X, 'O'))
use_O.pack()
use_X = tk.Button(self.choose_one_window, text='I want use X',
width=40, height=5, command=partial(self.set_O_or_X, 'X'))
use_X.pack()
if __name__ == '__main__':
game = SingleMode()
<|reserved_special_token_1|>
import tkinter as tk
from functools import partial
from numpy import random
from base import NinePalaceGame
class SingleMode(NinePalaceGame):
player1 = player = 'O'
player2 = computer = 'X'
def __init__(self):
self.create_choose_one_window()
super().__init__()
self.main_game_window.mainloop()
def player_play(self, i, j):
if not self.game_is_over and not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.dominance)
self.dominance = self.computer
return 1
return 0
def computer_play(self):
if not self.game_is_over:
while 1:
i, j = random.choice(range(3)), random.choice(range(3))
if not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.computer)
self.dominance = self.player
break
def judge(self):
if self.check_win(self.player):
self.game_is_over = 1
self.billboard_value.set('Player is win!')
elif self.check_win(self.computer):
self.game_is_over = 1
self.billboard_value.set('Computer is win!')
elif self.check_game_over():
self.game_is_over = 1
self.billboard_value.set('Game over!')
def reset(self):
super().reset()
self.dominance = self.player
self.box = [
[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.main_game_window.withdraw()
self.choose_one_window.update()
self.choose_one_window.deiconify()
def button_function(self, i, j):
if self.player_play(i, j):
self.judge()
self.computer_play()
self.judge()
def set_O_or_X(self, use):
self.player = use
if use == 'X':
self.computer = 'O'
self.computer_play()
else:
self.computer = 'X'
self.dominance = self.player
self.choose_one_window.withdraw()
self.main_game_window.update()
self.main_game_window.deiconify()
def create_choose_one_window(self):
self.choose_one_window = tk.Toplevel(self.main_game_window)
self.choose_one_window.title('choose one window')
self.choose_one_window.geometry('500x500')
choose_one_window_billboard = tk.StringVar(
master=self.choose_one_window, value='Choose you want')
use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,
height=5, textvariable=choose_one_window_billboard)
use_O_or_X.pack()
use_O = tk.Button(self.choose_one_window, text='I want use O', width=40,
height=5, command=partial(self.set_O_or_X, 'O'))
use_O.pack()
use_X = tk.Button(self.choose_one_window, text='I want use X', width=40,
height=5, command=partial(self.set_O_or_X, 'X'))
use_X.pack()
if __name__ == '__main__':
game = SingleMode()
|
flexible
|
{
"blob_id": "841743d4e9d683827962d83a77a87c6432842add",
"index": 8013,
"step-1": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n <mask token>\n <mask token>\n <mask token>\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n <mask token>\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n <mask token>\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n <mask token>\n <mask token>\n <mask token>\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n <mask token>\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n <mask token>\n <mask token>\n\n def __init__(self):\n self.create_choose_one_window()\n super().__init__()\n self.main_game_window.mainloop()\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n\n def judge(self):\n if self.check_win(self.player):\n self.game_is_over = 1\n self.billboard_value.set('Player is win!')\n elif self.check_win(self.computer):\n self.game_is_over = 1\n self.billboard_value.set('Computer is win!')\n elif self.check_game_over():\n self.game_is_over = 1\n self.billboard_value.set('Game over!')\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n player1 = player = 'O'\n player2 = computer = 'X'\n\n def __init__(self):\n self.create_choose_one_window()\n super().__init__()\n self.main_game_window.mainloop()\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n\n def judge(self):\n if self.check_win(self.player):\n self.game_is_over = 1\n self.billboard_value.set('Player is win!')\n elif self.check_win(self.computer):\n self.game_is_over = 1\n self.billboard_value.set('Computer is win!')\n elif self.check_game_over():\n self.game_is_over = 1\n self.billboard_value.set('Game over!')\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\nif __name__ == '__main__':\n game = SingleMode()\n",
"step-5": "import tkinter as tk\nfrom functools import partial\nfrom numpy import random\nfrom base import NinePalaceGame\n\n\nclass SingleMode(NinePalaceGame):\n player1 = player = 'O'\n player2 = computer = 'X'\n\n def __init__(self):\n self.create_choose_one_window()\n super().__init__()\n\n self.main_game_window.mainloop()\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n\n def judge(self):\n if self.check_win(self.player):\n self.game_is_over = 1\n self.billboard_value.set('Player is win!')\n elif self.check_win(self.computer):\n self.game_is_over = 1\n self.billboard_value.set('Computer is win!')\n elif self.check_game_over():\n self.game_is_over = 1\n self.billboard_value.set('Game over!')\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [\n [0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n\n choose_one_window_billboard = tk.StringVar(\n master=self.choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n\n use_O = tk.Button(self.choose_one_window, text='I want use O', width=40,\n height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X', width=40,\n height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\nif __name__ == '__main__':\n game = SingleMode()\n",
"step-ids": [
6,
7,
9,
11,
13
]
}
|
[
6,
7,
9,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GF_AVCConfigSlot(Structure):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GF_AVCConfigSlot(Structure):
_fields_ = [('size', c_uint16), ('data', c_char), ('id', int)]
<|reserved_special_token_1|>
from ctypes import *
class GF_AVCConfigSlot(Structure):
_fields_ = [('size', c_uint16), ('data', c_char), ('id', int)]
<|reserved_special_token_1|>
from ctypes import *
class GF_AVCConfigSlot(Structure):
_fields_=[
("size", c_uint16),
("data", c_char),
("id", int)
]
|
flexible
|
{
"blob_id": "f3b194bbc3c174549b64d6e6b1a8f4438a0c9d38",
"index": 4791,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GF_AVCConfigSlot(Structure):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GF_AVCConfigSlot(Structure):\n _fields_ = [('size', c_uint16), ('data', c_char), ('id', int)]\n",
"step-4": "from ctypes import *\n\n\nclass GF_AVCConfigSlot(Structure):\n _fields_ = [('size', c_uint16), ('data', c_char), ('id', int)]\n",
"step-5": "from ctypes import *\n\n\nclass GF_AVCConfigSlot(Structure):\n _fields_=[\n (\"size\", c_uint16),\n (\"data\", c_char),\n (\"id\", int)\n ]",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
/Users/jhajhajhajha1/anaconda/lib/python3.6/codecs.py
|
normal
|
{
"blob_id": "0354445d255cc79d3cb9242f82d37e035ff61788",
"index": 2410,
"step-1": "/Users/jhajhajhajha1/anaconda/lib/python3.6/codecs.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Grove_PIR(Pmod_IO):
<|reserved_special_token_0|>
def __init__(self, mb_info, gr_pin):
"""Return a new instance of a PIR object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:
raise ValueError('Group number can only be G1 - G2.')
super().__init__(mb_info, gr_pin[0], 'in')
def read(self):
"""Receive the value from the PIR sensor.
Returns 0 when there is no motion, and returns 1 otherwise.
Returns
-------
int
The data (0 or 1) read from the PIR sensor.
"""
return super().read()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Grove_PIR(Pmod_IO):
"""This class controls the PIR motion sensor.
Hardware version: v1.2.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
index : int
The index of the Pmod pin, from 0 to 7.
direction : str
Can only be 'in' for PIR sensor.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of a PIR object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:
raise ValueError('Group number can only be G1 - G2.')
super().__init__(mb_info, gr_pin[0], 'in')
def read(self):
"""Receive the value from the PIR sensor.
Returns 0 when there is no motion, and returns 1 otherwise.
Returns
-------
int
The data (0 or 1) read from the PIR sensor.
"""
return super().read()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Yun Rock Qu'
__copyright__ = 'Copyright 2016, Xilinx'
__email__ = 'pynq_support@xilinx.com'
class Grove_PIR(Pmod_IO):
"""This class controls the PIR motion sensor.
Hardware version: v1.2.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
index : int
The index of the Pmod pin, from 0 to 7.
direction : str
Can only be 'in' for PIR sensor.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of a PIR object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:
raise ValueError('Group number can only be G1 - G2.')
super().__init__(mb_info, gr_pin[0], 'in')
def read(self):
"""Receive the value from the PIR sensor.
Returns 0 when there is no motion, and returns 1 otherwise.
Returns
-------
int
The data (0 or 1) read from the PIR sensor.
"""
return super().read()
<|reserved_special_token_1|>
from . import Pmod_IO
from . import PMOD_GROVE_G1
from . import PMOD_GROVE_G2
__author__ = 'Yun Rock Qu'
__copyright__ = 'Copyright 2016, Xilinx'
__email__ = 'pynq_support@xilinx.com'
class Grove_PIR(Pmod_IO):
"""This class controls the PIR motion sensor.
Hardware version: v1.2.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
index : int
The index of the Pmod pin, from 0 to 7.
direction : str
Can only be 'in' for PIR sensor.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of a PIR object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:
raise ValueError('Group number can only be G1 - G2.')
super().__init__(mb_info, gr_pin[0], 'in')
def read(self):
"""Receive the value from the PIR sensor.
Returns 0 when there is no motion, and returns 1 otherwise.
Returns
-------
int
The data (0 or 1) read from the PIR sensor.
"""
return super().read()
<|reserved_special_token_1|>
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import Pmod_IO
from . import PMOD_GROVE_G1
from . import PMOD_GROVE_G2
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
class Grove_PIR(Pmod_IO):
"""This class controls the PIR motion sensor.
Hardware version: v1.2.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
index : int
The index of the Pmod pin, from 0 to 7.
direction : str
Can only be 'in' for PIR sensor.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of a PIR object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1,
PMOD_GROVE_G2]:
raise ValueError("Group number can only be G1 - G2.")
super().__init__(mb_info, gr_pin[0], 'in')
def read(self):
"""Receive the value from the PIR sensor.
Returns 0 when there is no motion, and returns 1 otherwise.
Returns
-------
int
The data (0 or 1) read from the PIR sensor.
"""
return super().read()
|
flexible
|
{
"blob_id": "15514d5636471b1a311641a40b6a00b81703cd2b",
"index": 6488,
"step-1": "<mask token>\n\n\nclass Grove_PIR(Pmod_IO):\n <mask token>\n\n def __init__(self, mb_info, gr_pin):\n \"\"\"Return a new instance of a PIR object. \n \n Parameters\n ----------\n mb_info : dict\n A dictionary storing Microblaze information, such as the\n IP name and the reset name.\n gr_pin: list\n A group of pins on pmod-grove adapter.\n \n \"\"\"\n if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:\n raise ValueError('Group number can only be G1 - G2.')\n super().__init__(mb_info, gr_pin[0], 'in')\n\n def read(self):\n \"\"\"Receive the value from the PIR sensor.\n\n Returns 0 when there is no motion, and returns 1 otherwise.\n\n Returns\n -------\n int\n The data (0 or 1) read from the PIR sensor.\n\n \"\"\"\n return super().read()\n",
"step-2": "<mask token>\n\n\nclass Grove_PIR(Pmod_IO):\n \"\"\"This class controls the PIR motion sensor.\n\n Hardware version: v1.2.\n \n Attributes\n ----------\n microblaze : Pmod\n Microblaze processor instance used by this module.\n index : int\n The index of the Pmod pin, from 0 to 7.\n direction : str\n Can only be 'in' for PIR sensor.\n \n \"\"\"\n\n def __init__(self, mb_info, gr_pin):\n \"\"\"Return a new instance of a PIR object. \n \n Parameters\n ----------\n mb_info : dict\n A dictionary storing Microblaze information, such as the\n IP name and the reset name.\n gr_pin: list\n A group of pins on pmod-grove adapter.\n \n \"\"\"\n if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:\n raise ValueError('Group number can only be G1 - G2.')\n super().__init__(mb_info, gr_pin[0], 'in')\n\n def read(self):\n \"\"\"Receive the value from the PIR sensor.\n\n Returns 0 when there is no motion, and returns 1 otherwise.\n\n Returns\n -------\n int\n The data (0 or 1) read from the PIR sensor.\n\n \"\"\"\n return super().read()\n",
"step-3": "<mask token>\n__author__ = 'Yun Rock Qu'\n__copyright__ = 'Copyright 2016, Xilinx'\n__email__ = 'pynq_support@xilinx.com'\n\n\nclass Grove_PIR(Pmod_IO):\n \"\"\"This class controls the PIR motion sensor.\n\n Hardware version: v1.2.\n \n Attributes\n ----------\n microblaze : Pmod\n Microblaze processor instance used by this module.\n index : int\n The index of the Pmod pin, from 0 to 7.\n direction : str\n Can only be 'in' for PIR sensor.\n \n \"\"\"\n\n def __init__(self, mb_info, gr_pin):\n \"\"\"Return a new instance of a PIR object. \n \n Parameters\n ----------\n mb_info : dict\n A dictionary storing Microblaze information, such as the\n IP name and the reset name.\n gr_pin: list\n A group of pins on pmod-grove adapter.\n \n \"\"\"\n if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:\n raise ValueError('Group number can only be G1 - G2.')\n super().__init__(mb_info, gr_pin[0], 'in')\n\n def read(self):\n \"\"\"Receive the value from the PIR sensor.\n\n Returns 0 when there is no motion, and returns 1 otherwise.\n\n Returns\n -------\n int\n The data (0 or 1) read from the PIR sensor.\n\n \"\"\"\n return super().read()\n",
"step-4": "from . import Pmod_IO\nfrom . import PMOD_GROVE_G1\nfrom . import PMOD_GROVE_G2\n__author__ = 'Yun Rock Qu'\n__copyright__ = 'Copyright 2016, Xilinx'\n__email__ = 'pynq_support@xilinx.com'\n\n\nclass Grove_PIR(Pmod_IO):\n \"\"\"This class controls the PIR motion sensor.\n\n Hardware version: v1.2.\n \n Attributes\n ----------\n microblaze : Pmod\n Microblaze processor instance used by this module.\n index : int\n The index of the Pmod pin, from 0 to 7.\n direction : str\n Can only be 'in' for PIR sensor.\n \n \"\"\"\n\n def __init__(self, mb_info, gr_pin):\n \"\"\"Return a new instance of a PIR object. \n \n Parameters\n ----------\n mb_info : dict\n A dictionary storing Microblaze information, such as the\n IP name and the reset name.\n gr_pin: list\n A group of pins on pmod-grove adapter.\n \n \"\"\"\n if gr_pin not in [PMOD_GROVE_G1, PMOD_GROVE_G2]:\n raise ValueError('Group number can only be G1 - G2.')\n super().__init__(mb_info, gr_pin[0], 'in')\n\n def read(self):\n \"\"\"Receive the value from the PIR sensor.\n\n Returns 0 when there is no motion, and returns 1 otherwise.\n\n Returns\n -------\n int\n The data (0 or 1) read from the PIR sensor.\n\n \"\"\"\n return super().read()\n",
"step-5": "# Copyright (c) 2016, Xilinx, Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in the \n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom . import Pmod_IO\nfrom . import PMOD_GROVE_G1\nfrom . import PMOD_GROVE_G2\n\n\n__author__ = \"Yun Rock Qu\"\n__copyright__ = \"Copyright 2016, Xilinx\"\n__email__ = \"pynq_support@xilinx.com\"\n\n\nclass Grove_PIR(Pmod_IO):\n \"\"\"This class controls the PIR motion sensor.\n\n Hardware version: v1.2.\n \n Attributes\n ----------\n microblaze : Pmod\n Microblaze processor instance used by this module.\n index : int\n The index of the Pmod pin, from 0 to 7.\n direction : str\n Can only be 'in' for PIR sensor.\n \n \"\"\"\n def __init__(self, mb_info, gr_pin):\n \"\"\"Return a new instance of a PIR object. \n \n Parameters\n ----------\n mb_info : dict\n A dictionary storing Microblaze information, such as the\n IP name and the reset name.\n gr_pin: list\n A group of pins on pmod-grove adapter.\n \n \"\"\"\n if gr_pin not in [PMOD_GROVE_G1,\n PMOD_GROVE_G2]:\n raise ValueError(\"Group number can only be G1 - G2.\")\n\n super().__init__(mb_info, gr_pin[0], 'in')\n\n def read(self):\n \"\"\"Receive the value from the PIR sensor.\n\n Returns 0 when there is no motion, and returns 1 otherwise.\n\n Returns\n -------\n int\n The data (0 or 1) read from the PIR sensor.\n\n \"\"\"\n return super().read()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
app_dist_Tables00.py illustrates use of pitaxcalc-demo release 2.0.0
(India version).
USAGE: python app_dist_Tables00.py
"""
import pandas as pd
from taxcalc import *
import numpy as np
from babel.numbers import format_currency
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# Generate Charts
# first merge the files
START_YEAR = 2017
END_YEAR = 2023
BASE_YEAR = 2019
year = START_YEAR
a={}
for year in range(BASE_YEAR, END_YEAR+1):
filename1='dist-table-all-clp-avg-'+str(year)+'.txt'
df1 = pd.read_fwf(filename1)
df1.drop('Unnamed: 0',axis=1,inplace=True)
col_list = df1.columns[1:] + '_avg_clp_' + str(year)
col_list = col_list.insert(0, 'Income_Bracket')
df1.columns = col_list
filename2='dist-table-all-clp-total-'+str(year)+'.txt'
df2 = pd.read_fwf(filename2)
df2.drop('Unnamed: 0',axis=1,inplace=True)
col_list = df2.columns[1:] + '_total_clp_' + str(year)
col_list = col_list.insert(0, 'Income_Bracket')
df2.columns = col_list
a[year] = pd.merge(df1, df2, how="inner", on="Income_Bracket")
filename3='dist-table-all-ref-avg-'+str(year)+'.txt'
df3 = pd.read_fwf(filename3)
df3.drop('Unnamed: 0',axis=1,inplace=True)
col_list = df3.columns[1:] + '_avg_ref_' + str(year)
col_list = col_list.insert(0, 'Income_Bracket')
df3.columns = col_list
a[year] = pd.merge(a[year], df3, how="inner", on="Income_Bracket")
filename4='dist-table-all-ref-total-'+str(year)+'.txt'
df4 = pd.read_fwf(filename4)
df4.drop('Unnamed: 0',axis=1,inplace=True)
col_list = df4.columns[1:] + '_total_ref_' + str(year)
col_list = col_list.insert(0, 'Income_Bracket')
df4.columns = col_list
a[year] = pd.merge(a[year], df4, how="inner", on="Income_Bracket")
df=a[BASE_YEAR]
for year in range(BASE_YEAR+1, END_YEAR+1):
df = pd.merge(df, a[year], how="inner", on="Income_Bracket")
df.set_index('Income_Bracket', inplace=True)
df.to_csv('dist-table-all-years.csv', index=True)
df = pd.read_csv('dist-table-all-years.csv')
df.set_index('Income_Bracket', inplace=True)
df_pit_totals_clp = df[df.columns[df.columns.str.startswith('pitax_total_clp')]]
df_pit_totals_ref = df[df.columns[df.columns.str.startswith('pitax_total_ref')]]
clp_pitax_list = df_pit_totals_clp.loc['ALL'].tolist()
clp_pitax_list = [float(i.replace(',','')) for i in clp_pitax_list]
clp_pitax_list = [round(elem, 0) for elem in clp_pitax_list ]
ref_pitax_list = df_pit_totals_ref.loc['ALL'].tolist()
ref_pitax_list = [float(i.replace(',','')) for i in ref_pitax_list]
ref_pitax_list = [round(elem, 0) for elem in ref_pitax_list ]
years = [x[-4:] for x in list(df_pit_totals_clp.columns)]
plt.style.use('seaborn-whitegrid')
fig = plt.figure()
"""
ax = plt.axes()
ax.plot(x, np.sin(x))
ax.set(xlim=(0, 10), ylim=(-2, 2),
xlabel='x', ylabel='sin(x)',
title='A Simple Plot')
"""
#plt.axis([2017, 2021, 150000, 400000])
plt.title("Estimated Tax Collection")
plt.xlabel("Year")
plt.ylabel("Tax Collection in lakh Cr.");
"""
print(year)
print(clp_pitax_list)
print(ref_pitax_list)
"""
plt.plot(years, clp_pitax_list, linestyle='-', marker='o', color='b', label='Current Law', linewidth=2.0)
plt.plot(years, ref_pitax_list, linestyle='--', marker='o', color='r', label='Reform', linewidth=2.0)
plt.legend(loc='best')
plt.savefig('Total_collection_PIT.png')
plt.show()
# generating bar chart for difference in average tax burden due to reform
# for 2020 - the first year of reform
year = 2020
df_pitax_diff = df['pitax_diff_avg_ref_'+str(year)]
df_pitax_diff = df_pitax_diff[:-1]
df_pitax_diff = df_pitax_diff[2:]
df_pitax_diff = df_pitax_diff.reset_index()
pitax_inc_brac_list = df_pitax_diff['Income_Bracket'].tolist()
pitax_diff_list = df_pitax_diff['pitax_diff_avg_ref_'+str(year)].tolist()
pitax_diff_list = [float(i.replace(',','')) for i in pitax_diff_list]
plt.rcdefaults()
#plt.style.use('seaborn-whitegrid')
fig, ax = plt.subplots(figsize=(8, 5))
# Example data
x_pos = np.arange(len(pitax_inc_brac_list))
ax.bar(x_pos, pitax_diff_list,
color='green')
ax.set_xticks(x_pos)
ax.set_xticklabels(pitax_inc_brac_list)
#ax.invert_yaxis() # labels read top-to-bottom
ax.set_ylabel('Rupees')
ax.set_xlabel('Income Bracket')
ax.invert_yaxis()
ax.set_title('Change in Average Tax Burden Due to Reform in 2020')
plt.savefig('Average Tax Burden Change.png')
plt.show()
# generating pie chart for contribution of tax by different income groups
# for 2020 - the first year of reform
year = 2020
df_pitax_tot_clp = df['pitax_total_clp_'+str(year)]
df_pitax_tot_clp = df_pitax_tot_clp[:-1]
df_pitax_tot_clp = df_pitax_tot_clp[2:]
df_pitax_tot_clp = df_pitax_tot_clp.reset_index()
pitax_inc_brac_list_clp = df_pitax_tot_clp['Income_Bracket'].tolist()
pitax_tot_list_clp = df_pitax_tot_clp['pitax_total_clp_'+str(year)].tolist()
pitax_tot_list_clp = [float(i.replace(',','')) for i in pitax_tot_list_clp]
pitax_tot_list_clp = [round(elem) for elem in pitax_tot_list_clp ]
fig, ax = plt.subplots(figsize=(10, 10))
# only "explode" the 5th slice (contributing to max revenue)
explode = (0, 0, 0, 0, 0.1, 0, 0, 0, 0)
ax.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.suptitle('Contribution by Income Bracket to total PIT in 2020', fontsize=16, fontweight="bold")
ax.set_title('Current Law', fontsize=16, fontweight="bold")
plt.savefig('Contribution to total PIT.png')
plt.show()
# generating pie chart for comparing contribution of tax by
# different income groups for clp and reform for 2020 - the first year of reform
year = 2020
df_pitax_tot = df['pitax_total_ref_'+str(year)]
df_pitax_tot = df_pitax_tot[:-1]
df_pitax_tot = df_pitax_tot[2:]
df_pitax_tot = df_pitax_tot.reset_index()
pitax_inc_brac_list = df_pitax_tot['Income_Bracket'].tolist()
pitax_tot_list = df_pitax_tot['pitax_total_ref_'+str(year)].tolist()
pitax_tot_list = [float(i.replace(',','')) for i in pitax_tot_list]
pitax_tot_list = [round(elem) for elem in pitax_tot_list ]
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10, 5))
#fig, ax = plt.subplots(figsize=(10, 5))
#the_grid = GridSpec(2, 2)
# only "explode" the 5th slice (contributing to max revenue)
explode = (0, 0, 0, 0, 0.1, 0, 0, 0, 0)
#plt.subplot(the_grid[1, 0], aspect=1)
plt.suptitle('Contribution by Income Bracket to total PIT in 2020', fontsize=16, fontweight="bold")
ax1.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp, autopct='%1.1f%%',
shadow=False, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
#plt.subplot(the_grid[0, 1], aspect=1)
ax2.pie(pitax_tot_list, explode=explode, labels=pitax_inc_brac_list, autopct='%1.1f%%',
shadow=False, startangle=90)
ax2.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
ax1.set_title('Current Law', fontweight="bold")
ax2.set_title('Reform', fontweight="bold")
plt.savefig('Contribution to total PIT - Before and After Reform.png')
plt.show()
|
normal
|
{
"blob_id": "c3967ab15b8278d958fa5ff6ff48bbfb0b086238",
"index": 3729,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor year in range(BASE_YEAR, END_YEAR + 1):\n filename1 = 'dist-table-all-clp-avg-' + str(year) + '.txt'\n df1 = pd.read_fwf(filename1)\n df1.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df1.columns[1:] + '_avg_clp_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df1.columns = col_list\n filename2 = 'dist-table-all-clp-total-' + str(year) + '.txt'\n df2 = pd.read_fwf(filename2)\n df2.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df2.columns[1:] + '_total_clp_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df2.columns = col_list\n a[year] = pd.merge(df1, df2, how='inner', on='Income_Bracket')\n filename3 = 'dist-table-all-ref-avg-' + str(year) + '.txt'\n df3 = pd.read_fwf(filename3)\n df3.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df3.columns[1:] + '_avg_ref_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df3.columns = col_list\n a[year] = pd.merge(a[year], df3, how='inner', on='Income_Bracket')\n filename4 = 'dist-table-all-ref-total-' + str(year) + '.txt'\n df4 = pd.read_fwf(filename4)\n df4.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df4.columns[1:] + '_total_ref_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df4.columns = col_list\n a[year] = pd.merge(a[year], df4, how='inner', on='Income_Bracket')\n<mask token>\nfor year in range(BASE_YEAR + 1, END_YEAR + 1):\n df = pd.merge(df, a[year], how='inner', on='Income_Bracket')\ndf.set_index('Income_Bracket', inplace=True)\ndf.to_csv('dist-table-all-years.csv', index=True)\n<mask token>\ndf.set_index('Income_Bracket', inplace=True)\n<mask token>\nplt.style.use('seaborn-whitegrid')\n<mask token>\nplt.title('Estimated Tax Collection')\nplt.xlabel('Year')\nplt.ylabel('Tax Collection in lakh Cr.')\n<mask token>\nplt.plot(years, clp_pitax_list, linestyle='-', marker='o', color='b', label\n ='Current Law', linewidth=2.0)\nplt.plot(years, ref_pitax_list, linestyle='--', marker='o', color='r',\n label='Reform', linewidth=2.0)\nplt.legend(loc='best')\nplt.savefig('Total_collection_PIT.png')\nplt.show()\n<mask token>\nplt.rcdefaults()\n<mask token>\nax.bar(x_pos, pitax_diff_list, color='green')\nax.set_xticks(x_pos)\nax.set_xticklabels(pitax_inc_brac_list)\nax.set_ylabel('Rupees')\nax.set_xlabel('Income Bracket')\nax.invert_yaxis()\nax.set_title('Change in Average Tax Burden Due to Reform in 2020')\nplt.savefig('Average Tax Burden Change.png')\nplt.show()\n<mask token>\nax.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax.axis('equal')\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020',\n fontsize=16, fontweight='bold')\nax.set_title('Current Law', fontsize=16, fontweight='bold')\nplt.savefig('Contribution to total PIT.png')\nplt.show()\n<mask token>\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020',\n fontsize=16, fontweight='bold')\nax1.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax1.axis('equal')\nax2.pie(pitax_tot_list, explode=explode, labels=pitax_inc_brac_list,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax2.axis('equal')\nax1.set_title('Current Law', fontweight='bold')\nax2.set_title('Reform', fontweight='bold')\nplt.savefig('Contribution to total PIT - Before and After Reform.png')\nplt.show()\n",
"step-3": "<mask token>\nSTART_YEAR = 2017\nEND_YEAR = 2023\nBASE_YEAR = 2019\nyear = START_YEAR\na = {}\nfor year in range(BASE_YEAR, END_YEAR + 1):\n filename1 = 'dist-table-all-clp-avg-' + str(year) + '.txt'\n df1 = pd.read_fwf(filename1)\n df1.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df1.columns[1:] + '_avg_clp_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df1.columns = col_list\n filename2 = 'dist-table-all-clp-total-' + str(year) + '.txt'\n df2 = pd.read_fwf(filename2)\n df2.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df2.columns[1:] + '_total_clp_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df2.columns = col_list\n a[year] = pd.merge(df1, df2, how='inner', on='Income_Bracket')\n filename3 = 'dist-table-all-ref-avg-' + str(year) + '.txt'\n df3 = pd.read_fwf(filename3)\n df3.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df3.columns[1:] + '_avg_ref_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df3.columns = col_list\n a[year] = pd.merge(a[year], df3, how='inner', on='Income_Bracket')\n filename4 = 'dist-table-all-ref-total-' + str(year) + '.txt'\n df4 = pd.read_fwf(filename4)\n df4.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df4.columns[1:] + '_total_ref_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df4.columns = col_list\n a[year] = pd.merge(a[year], df4, how='inner', on='Income_Bracket')\ndf = a[BASE_YEAR]\nfor year in range(BASE_YEAR + 1, END_YEAR + 1):\n df = pd.merge(df, a[year], how='inner', on='Income_Bracket')\ndf.set_index('Income_Bracket', inplace=True)\ndf.to_csv('dist-table-all-years.csv', index=True)\ndf = pd.read_csv('dist-table-all-years.csv')\ndf.set_index('Income_Bracket', inplace=True)\ndf_pit_totals_clp = df[df.columns[df.columns.str.startswith('pitax_total_clp')]\n ]\ndf_pit_totals_ref = df[df.columns[df.columns.str.startswith('pitax_total_ref')]\n ]\nclp_pitax_list = df_pit_totals_clp.loc['ALL'].tolist()\nclp_pitax_list = [float(i.replace(',', '')) for i in clp_pitax_list]\nclp_pitax_list = [round(elem, 0) for elem in clp_pitax_list]\nref_pitax_list = df_pit_totals_ref.loc['ALL'].tolist()\nref_pitax_list = [float(i.replace(',', '')) for i in ref_pitax_list]\nref_pitax_list = [round(elem, 0) for elem in ref_pitax_list]\nyears = [x[-4:] for x in list(df_pit_totals_clp.columns)]\nplt.style.use('seaborn-whitegrid')\nfig = plt.figure()\n<mask token>\nplt.title('Estimated Tax Collection')\nplt.xlabel('Year')\nplt.ylabel('Tax Collection in lakh Cr.')\n<mask token>\nplt.plot(years, clp_pitax_list, linestyle='-', marker='o', color='b', label\n ='Current Law', linewidth=2.0)\nplt.plot(years, ref_pitax_list, linestyle='--', marker='o', color='r',\n label='Reform', linewidth=2.0)\nplt.legend(loc='best')\nplt.savefig('Total_collection_PIT.png')\nplt.show()\nyear = 2020\ndf_pitax_diff = df['pitax_diff_avg_ref_' + str(year)]\ndf_pitax_diff = df_pitax_diff[:-1]\ndf_pitax_diff = df_pitax_diff[2:]\ndf_pitax_diff = df_pitax_diff.reset_index()\npitax_inc_brac_list = df_pitax_diff['Income_Bracket'].tolist()\npitax_diff_list = df_pitax_diff['pitax_diff_avg_ref_' + str(year)].tolist()\npitax_diff_list = [float(i.replace(',', '')) for i in pitax_diff_list]\nplt.rcdefaults()\nfig, ax = plt.subplots(figsize=(8, 5))\nx_pos = np.arange(len(pitax_inc_brac_list))\nax.bar(x_pos, pitax_diff_list, color='green')\nax.set_xticks(x_pos)\nax.set_xticklabels(pitax_inc_brac_list)\nax.set_ylabel('Rupees')\nax.set_xlabel('Income Bracket')\nax.invert_yaxis()\nax.set_title('Change in Average Tax Burden Due to Reform in 2020')\nplt.savefig('Average Tax Burden Change.png')\nplt.show()\nyear = 2020\ndf_pitax_tot_clp = df['pitax_total_clp_' + str(year)]\ndf_pitax_tot_clp = df_pitax_tot_clp[:-1]\ndf_pitax_tot_clp = df_pitax_tot_clp[2:]\ndf_pitax_tot_clp = df_pitax_tot_clp.reset_index()\npitax_inc_brac_list_clp = df_pitax_tot_clp['Income_Bracket'].tolist()\npitax_tot_list_clp = df_pitax_tot_clp['pitax_total_clp_' + str(year)].tolist()\npitax_tot_list_clp = [float(i.replace(',', '')) for i in pitax_tot_list_clp]\npitax_tot_list_clp = [round(elem) for elem in pitax_tot_list_clp]\nfig, ax = plt.subplots(figsize=(10, 10))\nexplode = 0, 0, 0, 0, 0.1, 0, 0, 0, 0\nax.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax.axis('equal')\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020',\n fontsize=16, fontweight='bold')\nax.set_title('Current Law', fontsize=16, fontweight='bold')\nplt.savefig('Contribution to total PIT.png')\nplt.show()\nyear = 2020\ndf_pitax_tot = df['pitax_total_ref_' + str(year)]\ndf_pitax_tot = df_pitax_tot[:-1]\ndf_pitax_tot = df_pitax_tot[2:]\ndf_pitax_tot = df_pitax_tot.reset_index()\npitax_inc_brac_list = df_pitax_tot['Income_Bracket'].tolist()\npitax_tot_list = df_pitax_tot['pitax_total_ref_' + str(year)].tolist()\npitax_tot_list = [float(i.replace(',', '')) for i in pitax_tot_list]\npitax_tot_list = [round(elem) for elem in pitax_tot_list]\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\nexplode = 0, 0, 0, 0, 0.1, 0, 0, 0, 0\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020',\n fontsize=16, fontweight='bold')\nax1.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax1.axis('equal')\nax2.pie(pitax_tot_list, explode=explode, labels=pitax_inc_brac_list,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax2.axis('equal')\nax1.set_title('Current Law', fontweight='bold')\nax2.set_title('Reform', fontweight='bold')\nplt.savefig('Contribution to total PIT - Before and After Reform.png')\nplt.show()\n",
"step-4": "<mask token>\nimport pandas as pd\nfrom taxcalc import *\nimport numpy as np\nfrom babel.numbers import format_currency\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nSTART_YEAR = 2017\nEND_YEAR = 2023\nBASE_YEAR = 2019\nyear = START_YEAR\na = {}\nfor year in range(BASE_YEAR, END_YEAR + 1):\n filename1 = 'dist-table-all-clp-avg-' + str(year) + '.txt'\n df1 = pd.read_fwf(filename1)\n df1.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df1.columns[1:] + '_avg_clp_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df1.columns = col_list\n filename2 = 'dist-table-all-clp-total-' + str(year) + '.txt'\n df2 = pd.read_fwf(filename2)\n df2.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df2.columns[1:] + '_total_clp_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df2.columns = col_list\n a[year] = pd.merge(df1, df2, how='inner', on='Income_Bracket')\n filename3 = 'dist-table-all-ref-avg-' + str(year) + '.txt'\n df3 = pd.read_fwf(filename3)\n df3.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df3.columns[1:] + '_avg_ref_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df3.columns = col_list\n a[year] = pd.merge(a[year], df3, how='inner', on='Income_Bracket')\n filename4 = 'dist-table-all-ref-total-' + str(year) + '.txt'\n df4 = pd.read_fwf(filename4)\n df4.drop('Unnamed: 0', axis=1, inplace=True)\n col_list = df4.columns[1:] + '_total_ref_' + str(year)\n col_list = col_list.insert(0, 'Income_Bracket')\n df4.columns = col_list\n a[year] = pd.merge(a[year], df4, how='inner', on='Income_Bracket')\ndf = a[BASE_YEAR]\nfor year in range(BASE_YEAR + 1, END_YEAR + 1):\n df = pd.merge(df, a[year], how='inner', on='Income_Bracket')\ndf.set_index('Income_Bracket', inplace=True)\ndf.to_csv('dist-table-all-years.csv', index=True)\ndf = pd.read_csv('dist-table-all-years.csv')\ndf.set_index('Income_Bracket', inplace=True)\ndf_pit_totals_clp = df[df.columns[df.columns.str.startswith('pitax_total_clp')]\n ]\ndf_pit_totals_ref = df[df.columns[df.columns.str.startswith('pitax_total_ref')]\n ]\nclp_pitax_list = df_pit_totals_clp.loc['ALL'].tolist()\nclp_pitax_list = [float(i.replace(',', '')) for i in clp_pitax_list]\nclp_pitax_list = [round(elem, 0) for elem in clp_pitax_list]\nref_pitax_list = df_pit_totals_ref.loc['ALL'].tolist()\nref_pitax_list = [float(i.replace(',', '')) for i in ref_pitax_list]\nref_pitax_list = [round(elem, 0) for elem in ref_pitax_list]\nyears = [x[-4:] for x in list(df_pit_totals_clp.columns)]\nplt.style.use('seaborn-whitegrid')\nfig = plt.figure()\n<mask token>\nplt.title('Estimated Tax Collection')\nplt.xlabel('Year')\nplt.ylabel('Tax Collection in lakh Cr.')\n<mask token>\nplt.plot(years, clp_pitax_list, linestyle='-', marker='o', color='b', label\n ='Current Law', linewidth=2.0)\nplt.plot(years, ref_pitax_list, linestyle='--', marker='o', color='r',\n label='Reform', linewidth=2.0)\nplt.legend(loc='best')\nplt.savefig('Total_collection_PIT.png')\nplt.show()\nyear = 2020\ndf_pitax_diff = df['pitax_diff_avg_ref_' + str(year)]\ndf_pitax_diff = df_pitax_diff[:-1]\ndf_pitax_diff = df_pitax_diff[2:]\ndf_pitax_diff = df_pitax_diff.reset_index()\npitax_inc_brac_list = df_pitax_diff['Income_Bracket'].tolist()\npitax_diff_list = df_pitax_diff['pitax_diff_avg_ref_' + str(year)].tolist()\npitax_diff_list = [float(i.replace(',', '')) for i in pitax_diff_list]\nplt.rcdefaults()\nfig, ax = plt.subplots(figsize=(8, 5))\nx_pos = np.arange(len(pitax_inc_brac_list))\nax.bar(x_pos, pitax_diff_list, color='green')\nax.set_xticks(x_pos)\nax.set_xticklabels(pitax_inc_brac_list)\nax.set_ylabel('Rupees')\nax.set_xlabel('Income Bracket')\nax.invert_yaxis()\nax.set_title('Change in Average Tax Burden Due to Reform in 2020')\nplt.savefig('Average Tax Burden Change.png')\nplt.show()\nyear = 2020\ndf_pitax_tot_clp = df['pitax_total_clp_' + str(year)]\ndf_pitax_tot_clp = df_pitax_tot_clp[:-1]\ndf_pitax_tot_clp = df_pitax_tot_clp[2:]\ndf_pitax_tot_clp = df_pitax_tot_clp.reset_index()\npitax_inc_brac_list_clp = df_pitax_tot_clp['Income_Bracket'].tolist()\npitax_tot_list_clp = df_pitax_tot_clp['pitax_total_clp_' + str(year)].tolist()\npitax_tot_list_clp = [float(i.replace(',', '')) for i in pitax_tot_list_clp]\npitax_tot_list_clp = [round(elem) for elem in pitax_tot_list_clp]\nfig, ax = plt.subplots(figsize=(10, 10))\nexplode = 0, 0, 0, 0, 0.1, 0, 0, 0, 0\nax.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax.axis('equal')\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020',\n fontsize=16, fontweight='bold')\nax.set_title('Current Law', fontsize=16, fontweight='bold')\nplt.savefig('Contribution to total PIT.png')\nplt.show()\nyear = 2020\ndf_pitax_tot = df['pitax_total_ref_' + str(year)]\ndf_pitax_tot = df_pitax_tot[:-1]\ndf_pitax_tot = df_pitax_tot[2:]\ndf_pitax_tot = df_pitax_tot.reset_index()\npitax_inc_brac_list = df_pitax_tot['Income_Bracket'].tolist()\npitax_tot_list = df_pitax_tot['pitax_total_ref_' + str(year)].tolist()\npitax_tot_list = [float(i.replace(',', '')) for i in pitax_tot_list]\npitax_tot_list = [round(elem) for elem in pitax_tot_list]\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\nexplode = 0, 0, 0, 0, 0.1, 0, 0, 0, 0\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020',\n fontsize=16, fontweight='bold')\nax1.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax1.axis('equal')\nax2.pie(pitax_tot_list, explode=explode, labels=pitax_inc_brac_list,\n autopct='%1.1f%%', shadow=False, startangle=90)\nax2.axis('equal')\nax1.set_title('Current Law', fontweight='bold')\nax2.set_title('Reform', fontweight='bold')\nplt.savefig('Contribution to total PIT - Before and After Reform.png')\nplt.show()\n",
"step-5": "\"\"\"\r\napp_dist_Tables00.py illustrates use of pitaxcalc-demo release 2.0.0\r\n(India version).\r\nUSAGE: python app_dist_Tables00.py\r\n\"\"\"\r\nimport pandas as pd\r\nfrom taxcalc import *\r\nimport numpy as np\r\nfrom babel.numbers import format_currency\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.gridspec import GridSpec\r\n\r\n \r\n# Generate Charts\r\n# first merge the files\r\nSTART_YEAR = 2017\r\nEND_YEAR = 2023\r\nBASE_YEAR = 2019\r\nyear = START_YEAR\r\na={}\r\nfor year in range(BASE_YEAR, END_YEAR+1):\r\n filename1='dist-table-all-clp-avg-'+str(year)+'.txt'\r\n df1 = pd.read_fwf(filename1) \r\n df1.drop('Unnamed: 0',axis=1,inplace=True)\r\n col_list = df1.columns[1:] + '_avg_clp_' + str(year)\r\n col_list = col_list.insert(0, 'Income_Bracket')\r\n df1.columns = col_list\r\n filename2='dist-table-all-clp-total-'+str(year)+'.txt'\r\n df2 = pd.read_fwf(filename2) \r\n df2.drop('Unnamed: 0',axis=1,inplace=True)\r\n col_list = df2.columns[1:] + '_total_clp_' + str(year)\r\n col_list = col_list.insert(0, 'Income_Bracket')\r\n df2.columns = col_list\r\n a[year] = pd.merge(df1, df2, how=\"inner\", on=\"Income_Bracket\")\r\n filename3='dist-table-all-ref-avg-'+str(year)+'.txt'\r\n df3 = pd.read_fwf(filename3) \r\n df3.drop('Unnamed: 0',axis=1,inplace=True)\r\n col_list = df3.columns[1:] + '_avg_ref_' + str(year)\r\n col_list = col_list.insert(0, 'Income_Bracket')\r\n df3.columns = col_list\r\n a[year] = pd.merge(a[year], df3, how=\"inner\", on=\"Income_Bracket\")\r\n filename4='dist-table-all-ref-total-'+str(year)+'.txt'\r\n df4 = pd.read_fwf(filename4) \r\n df4.drop('Unnamed: 0',axis=1,inplace=True)\r\n col_list = df4.columns[1:] + '_total_ref_' + str(year)\r\n col_list = col_list.insert(0, 'Income_Bracket')\r\n df4.columns = col_list\r\n a[year] = pd.merge(a[year], df4, how=\"inner\", on=\"Income_Bracket\")\r\n\r\ndf=a[BASE_YEAR]\r\nfor year in range(BASE_YEAR+1, END_YEAR+1):\r\n df = pd.merge(df, a[year], how=\"inner\", on=\"Income_Bracket\")\r\n\r\ndf.set_index('Income_Bracket', inplace=True)\r\n\r\ndf.to_csv('dist-table-all-years.csv', index=True)\r\n\r\ndf = pd.read_csv('dist-table-all-years.csv')\r\ndf.set_index('Income_Bracket', inplace=True)\r\ndf_pit_totals_clp = df[df.columns[df.columns.str.startswith('pitax_total_clp')]]\r\ndf_pit_totals_ref = df[df.columns[df.columns.str.startswith('pitax_total_ref')]]\r\nclp_pitax_list = df_pit_totals_clp.loc['ALL'].tolist()\r\nclp_pitax_list = [float(i.replace(',','')) for i in clp_pitax_list]\r\nclp_pitax_list = [round(elem, 0) for elem in clp_pitax_list ]\r\nref_pitax_list = df_pit_totals_ref.loc['ALL'].tolist()\r\nref_pitax_list = [float(i.replace(',','')) for i in ref_pitax_list]\r\nref_pitax_list = [round(elem, 0) for elem in ref_pitax_list ]\r\nyears = [x[-4:] for x in list(df_pit_totals_clp.columns)]\r\n\r\nplt.style.use('seaborn-whitegrid')\r\nfig = plt.figure()\r\n\"\"\"\r\nax = plt.axes()\r\nax.plot(x, np.sin(x))\r\nax.set(xlim=(0, 10), ylim=(-2, 2),\r\n xlabel='x', ylabel='sin(x)',\r\n title='A Simple Plot')\r\n\"\"\"\r\n#plt.axis([2017, 2021, 150000, 400000])\r\nplt.title(\"Estimated Tax Collection\")\r\nplt.xlabel(\"Year\")\r\nplt.ylabel(\"Tax Collection in lakh Cr.\");\r\n\"\"\"\r\nprint(year)\r\nprint(clp_pitax_list)\r\nprint(ref_pitax_list)\r\n\"\"\"\r\nplt.plot(years, clp_pitax_list, linestyle='-', marker='o', color='b', label='Current Law', linewidth=2.0)\r\nplt.plot(years, ref_pitax_list, linestyle='--', marker='o', color='r', label='Reform', linewidth=2.0)\r\nplt.legend(loc='best')\r\nplt.savefig('Total_collection_PIT.png')\r\nplt.show()\r\n\r\n# generating bar chart for difference in average tax burden due to reform \r\n# for 2020 - the first year of reform\r\nyear = 2020\r\ndf_pitax_diff = df['pitax_diff_avg_ref_'+str(year)]\r\ndf_pitax_diff = df_pitax_diff[:-1]\r\ndf_pitax_diff = df_pitax_diff[2:]\r\ndf_pitax_diff = df_pitax_diff.reset_index()\r\npitax_inc_brac_list = df_pitax_diff['Income_Bracket'].tolist()\r\npitax_diff_list = df_pitax_diff['pitax_diff_avg_ref_'+str(year)].tolist()\r\n\r\npitax_diff_list = [float(i.replace(',','')) for i in pitax_diff_list]\r\n\r\nplt.rcdefaults()\r\n#plt.style.use('seaborn-whitegrid')\r\nfig, ax = plt.subplots(figsize=(8, 5))\r\n# Example data\r\nx_pos = np.arange(len(pitax_inc_brac_list))\r\nax.bar(x_pos, pitax_diff_list, \r\n color='green')\r\nax.set_xticks(x_pos)\r\nax.set_xticklabels(pitax_inc_brac_list)\r\n#ax.invert_yaxis() # labels read top-to-bottom\r\nax.set_ylabel('Rupees')\r\nax.set_xlabel('Income Bracket')\r\nax.invert_yaxis()\r\nax.set_title('Change in Average Tax Burden Due to Reform in 2020')\r\nplt.savefig('Average Tax Burden Change.png')\r\nplt.show()\r\n\r\n# generating pie chart for contribution of tax by different income groups \r\n# for 2020 - the first year of reform\r\n\r\nyear = 2020\r\ndf_pitax_tot_clp = df['pitax_total_clp_'+str(year)]\r\ndf_pitax_tot_clp = df_pitax_tot_clp[:-1]\r\ndf_pitax_tot_clp = df_pitax_tot_clp[2:]\r\ndf_pitax_tot_clp = df_pitax_tot_clp.reset_index()\r\npitax_inc_brac_list_clp = df_pitax_tot_clp['Income_Bracket'].tolist()\r\npitax_tot_list_clp = df_pitax_tot_clp['pitax_total_clp_'+str(year)].tolist()\r\npitax_tot_list_clp = [float(i.replace(',','')) for i in pitax_tot_list_clp]\r\npitax_tot_list_clp = [round(elem) for elem in pitax_tot_list_clp ]\r\n\r\nfig, ax = plt.subplots(figsize=(10, 10))\r\n# only \"explode\" the 5th slice (contributing to max revenue)\r\nexplode = (0, 0, 0, 0, 0.1, 0, 0, 0, 0) \r\n\r\nax.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp, autopct='%1.1f%%',\r\n shadow=False, startangle=90)\r\nax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\r\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020', fontsize=16, fontweight=\"bold\")\r\nax.set_title('Current Law', fontsize=16, fontweight=\"bold\")\r\nplt.savefig('Contribution to total PIT.png')\r\nplt.show()\r\n\r\n# generating pie chart for comparing contribution of tax by \r\n# different income groups for clp and reform for 2020 - the first year of reform\r\n\r\n\r\n\r\nyear = 2020\r\ndf_pitax_tot = df['pitax_total_ref_'+str(year)]\r\ndf_pitax_tot = df_pitax_tot[:-1]\r\ndf_pitax_tot = df_pitax_tot[2:]\r\ndf_pitax_tot = df_pitax_tot.reset_index()\r\npitax_inc_brac_list = df_pitax_tot['Income_Bracket'].tolist()\r\npitax_tot_list = df_pitax_tot['pitax_total_ref_'+str(year)].tolist()\r\npitax_tot_list = [float(i.replace(',','')) for i in pitax_tot_list]\r\npitax_tot_list = [round(elem) for elem in pitax_tot_list ]\r\n\r\n\r\nfig, (ax1, ax2) = plt.subplots(1,2, figsize=(10, 5))\r\n#fig, ax = plt.subplots(figsize=(10, 5))\r\n#the_grid = GridSpec(2, 2)\r\n\r\n# only \"explode\" the 5th slice (contributing to max revenue)\r\nexplode = (0, 0, 0, 0, 0.1, 0, 0, 0, 0) \r\n\r\n#plt.subplot(the_grid[1, 0], aspect=1)\r\nplt.suptitle('Contribution by Income Bracket to total PIT in 2020', fontsize=16, fontweight=\"bold\")\r\nax1.pie(pitax_tot_list_clp, explode=explode, labels=pitax_inc_brac_list_clp, autopct='%1.1f%%',\r\n shadow=False, startangle=90)\r\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\r\n\r\n#plt.subplot(the_grid[0, 1], aspect=1)\r\nax2.pie(pitax_tot_list, explode=explode, labels=pitax_inc_brac_list, autopct='%1.1f%%',\r\n shadow=False, startangle=90)\r\nax2.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\r\nax1.set_title('Current Law', fontweight=\"bold\")\r\nax2.set_title('Reform', fontweight=\"bold\")\r\nplt.savefig('Contribution to total PIT - Before and After Reform.png')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.